Mail archive
alpine-aports

[alpine-aports] [PATCH v3.3] main/xen: security upgrade - fixes #6350, #6497

From: Sergey Lukin <sergej.lukin_at_gmail.com>
Date: Wed, 7 Dec 2016 14:40:15 +0000

CVE-2016-7777
CVE-2016-9386, CVE-2016-9382, CVE-2016-9385, CVE-2016-9383, CVE-2016-9377,
CVE-2016-9378, CVE-2016-9381, CVE-2016-9379, CVE-2016-9380
---
 main/xen/APKBUILD                                  |  12 +-
 main/xen/xsa190-4.6-CVE-2016-7777.patch            | 163 +++++++++++++++++++++
 main/xen/xsa191-4.6-CVE-2016-9386.patch            | 138 +++++++++++++++++
 main/xen/xsa192-CVE-2016-9382.patch                |  64 ++++++++
 main/xen/xsa193-4.7-CVE-2016-9385.patch            |  68 +++++++++
 main/xen/xsa195-CVE-2016-9383.patch                |  45 ++++++
 ...entry-calculation-in-inject-CVE-2016-9377.patch |  61 ++++++++
 ...tion-of-software-interrupts-CVE-2016-9378.patch |  76 ++++++++++
 main/xen/xsa197-4.6-qemuu-CVE-2016-9381.patch      |  63 ++++++++
 main/xen/xsa197-qemut-CVE-2016-9381.patch          |  65 ++++++++
 main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch  |  62 ++++++++
 11 files changed, 816 insertions(+), 1 deletion(-)
 create mode 100644 main/xen/xsa190-4.6-CVE-2016-7777.patch
 create mode 100644 main/xen/xsa191-4.6-CVE-2016-9386.patch
 create mode 100644 main/xen/xsa192-CVE-2016-9382.patch
 create mode 100644 main/xen/xsa193-4.7-CVE-2016-9385.patch
 create mode 100644 main/xen/xsa195-CVE-2016-9383.patch
 create mode 100644 main/xen/xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch
 create mode 100644 main/xen/xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch
 create mode 100644 main/xen/xsa197-4.6-qemuu-CVE-2016-9381.patch
 create mode 100644 main/xen/xsa197-qemut-CVE-2016-9381.patch
 create mode 100644 main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index bd41f24..66c866d 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
_at_@ -3,7 +3,7 @@
 # Maintainer: William Pitcock <nenolod_at_dereferenced.org>
 pkgname=xen
 pkgver=4.6.3
-pkgrel=2
+pkgrel=3
 pkgdesc="Xen hypervisor"
 url="http://www.xen.org/"
 arch="x86_64"
_at_@ -51,6 +51,16 @@ source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.g
 	xsa186-4.6-0002-hvm-fep-Allow-testing-of-instructions-crossing-the.patch
 	xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
 	xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
+	xsa190-4.6-CVE-2016-7777.patch
+	xsa191-4.6-CVE-2016-9386.patch
+	xsa192-CVE-2016-9382.patch
+	xsa193-4.7-CVE-2016-9385.patch
+	xsa195-CVE-2016-9383.patch
+	xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch
+	xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch
+	xsa197-4.6-qemuu-CVE-2016-9381.patch
+	xsa197-qemut-CVE-2016-9381.patch
+	xsa198-CVE-2016-9379-CVE-2016-9380.patch
 
 	qemu-coroutine-gthread.patch
 	qemu-xen_paths.patch
diff --git a/main/xen/xsa190-4.6-CVE-2016-7777.patch b/main/xen/xsa190-4.6-CVE-2016-7777.patch
new file mode 100644
index 0000000..b950ae9
--- /dev/null
+++ b/main/xen/xsa190-4.6-CVE-2016-7777.patch
_at_@ -0,0 +1,163 @@
+x86emul: honor guest CR0.TS and CR0.EM
+
+We must not emulate any instructions accessing respective registers
+when either of these flags is set in the guest view of the register, or
+else we may do so on data not belonging to the guest's current task.
+
+Being architecturally required behavior, the logic gets placed in the
+instruction emulator instead of hvmemul_get_fpu(). It should be noted,
+though, that hvmemul_get_fpu() being the only current handler for the
+get_fpu() callback, we don't have an active problem with CR4: Both
+CR4.OSFXSR and CR4.OSXSAVE get handled as necessary by that function.
+
+This is XSA-190.
+
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+
+--- a/tools/tests/x86_emulator/test_x86_emulator.c
++++ b/tools/tests/x86_emulator/test_x86_emulator.c
+_at_@ -129,6 +129,22 @@ static inline uint64_t xgetbv(uint32_t x
+     (ebx & (1U << 5)) != 0; \
+ })
+ 
++static int read_cr(
++    unsigned int reg,
++    unsigned long *val,
++    struct x86_emulate_ctxt *ctxt)
++{
++    /* Fake just enough state for the emulator's _get_fpu() to be happy. */
++    switch ( reg )
++    {
++    case 0:
++        *val = 0x00000001; /* PE */
++        return X86EMUL_OKAY;
++    }
++
++    return X86EMUL_UNHANDLEABLE;
++}
++
+ int get_fpu(
+     void (*exception_callback)(void *, struct cpu_user_regs *),
+     void *exception_callback_arg,
+_at_@ -160,6 +176,7 @@ static struct x86_emulate_ops emulops =
+     .write      = write,
+     .cmpxchg    = cmpxchg,
+     .cpuid      = cpuid,
++    .read_cr    = read_cr,
+     .get_fpu    = get_fpu,
+ };
+ 
+--- a/xen/arch/x86/hvm/emulate.c
++++ b/xen/arch/x86/hvm/emulate.c
+_at_@ -1557,6 +1557,7 @@ static int hvmemul_get_fpu(
+     switch ( type )
+     {
+     case X86EMUL_FPU_fpu:
++    case X86EMUL_FPU_wait:
+         break;
+     case X86EMUL_FPU_mmx:
+         if ( !cpu_has_mmx )
+_at_@ -1564,7 +1565,6 @@ static int hvmemul_get_fpu(
+         break;
+     case X86EMUL_FPU_xmm:
+         if ( !cpu_has_xmm ||
+-             (curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_EM) ||
+              !(curr->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSFXSR) )
+             return X86EMUL_UNHANDLEABLE;
+         break;
+--- a/xen/arch/x86/x86_emulate/x86_emulate.c
++++ b/xen/arch/x86/x86_emulate/x86_emulate.c
+_at_@ -366,6 +366,9 @@ typedef union {
+ 
+ /* Control register flags. */
+ #define CR0_PE    (1<<0)
++#define CR0_MP    (1<<1)
++#define CR0_EM    (1<<2)
++#define CR0_TS    (1<<3)
+ #define CR4_TSD   (1<<2)
+ 
+ /* EFLAGS bit definitions. */
+_at_@ -393,6 +396,7 @@ typedef union {
+ #define EXC_OF  4
+ #define EXC_BR  5
+ #define EXC_UD  6
++#define EXC_NM  7
+ #define EXC_TS 10
+ #define EXC_NP 11
+ #define EXC_SS 12
+_at_@ -674,10 +678,45 @@ static void fpu_handle_exception(void *_
+     regs->eip += fic->insn_bytes;
+ }
+ 
++static int _get_fpu(
++    enum x86_emulate_fpu_type type,
++    struct fpu_insn_ctxt *fic,
++    struct x86_emulate_ctxt *ctxt,
++    const struct x86_emulate_ops *ops)
++{
++    int rc;
++
++    fic->exn_raised = 0;
++
++    fail_if(!ops->get_fpu);
++    rc = ops->get_fpu(fpu_handle_exception, fic, type, ctxt);
++
++    if ( rc == X86EMUL_OKAY )
++    {
++        unsigned long cr0;
++
++        fail_if(!ops->read_cr);
++        rc = ops->read_cr(0, &cr0, ctxt);
++        if ( rc != X86EMUL_OKAY )
++            return rc;
++        if ( cr0 & CR0_EM )
++        {
++            generate_exception_if(type == X86EMUL_FPU_fpu, EXC_NM, -1);
++            generate_exception_if(type == X86EMUL_FPU_mmx, EXC_UD, -1);
++            generate_exception_if(type == X86EMUL_FPU_xmm, EXC_UD, -1);
++        }
++        generate_exception_if((cr0 & CR0_TS) &&
++                              (type != X86EMUL_FPU_wait || (cr0 & CR0_MP)),
++                              EXC_NM, -1);
++    }
++
++ done:
++    return rc;
++}
++
+ #define get_fpu(_type, _fic)                                    \
+-do{ (_fic)->exn_raised = 0;                                     \
+-    fail_if(ops->get_fpu == NULL);                              \
+-    rc = ops->get_fpu(fpu_handle_exception, _fic, _type, ctxt); \
++do {                                                            \
++    rc = _get_fpu(_type, _fic, ctxt, ops);                      \
+     if ( rc ) goto done;                                        \
+ } while (0)
+ #define _put_fpu()                                              \
+_at_@ -2508,8 +2547,14 @@ x86_emulate(
+     }
+ 
+     case 0x9b:  /* wait/fwait */
+-        emulate_fpu_insn("fwait");
++    {
++        struct fpu_insn_ctxt fic = { .insn_bytes = 1 };
++
++        get_fpu(X86EMUL_FPU_wait, &fic);
++        asm volatile ( "fwait" ::: "memory" );
++        put_fpu(&fic);
+         break;
++    }
+ 
+     case 0x9c: /* pushf */
+         src.val = _regs.eflags;
+--- a/xen/arch/x86/x86_emulate/x86_emulate.h
++++ b/xen/arch/x86/x86_emulate/x86_emulate.h
+_at_@ -115,6 +115,7 @@ struct __packed segment_register {
+ /* FPU sub-types which may be requested via ->get_fpu(). */
+ enum x86_emulate_fpu_type {
+     X86EMUL_FPU_fpu, /* Standard FPU coprocessor instruction set */
++    X86EMUL_FPU_wait, /* WAIT/FWAIT instruction */
+     X86EMUL_FPU_mmx, /* MMX instruction set (%mm0-%mm7) */
+     X86EMUL_FPU_xmm, /* SSE instruction set (%xmm0-%xmm7/15) */
+     X86EMUL_FPU_ymm  /* AVX/XOP instruction set (%ymm0-%ymm7/15) */
diff --git a/main/xen/xsa191-4.6-CVE-2016-9386.patch b/main/xen/xsa191-4.6-CVE-2016-9386.patch
new file mode 100644
index 0000000..d661d0c
--- /dev/null
+++ b/main/xen/xsa191-4.6-CVE-2016-9386.patch
_at_@ -0,0 +1,138 @@
+From: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Subject: x86/hvm: Fix the handling of non-present segments
+
+In 32bit, the data segments may be NULL to indicate that the segment is
+ineligible for use.  In both 32bit and 64bit, the LDT selector may be NULL to
+indicate that the entire LDT is ineligible for use.  However, nothing in Xen
+actually checks for this condition when performing other segmentation
+checks.  (Note however that limit and writeability checks are correctly
+performed).
+
+Neither Intel nor AMD specify the exact behaviour of loading a NULL segment.
+Experimentally, AMD zeroes all attributes but leaves the base and limit
+unmodified.  Intel zeroes the base, sets the limit to 0xfffffff and resets the
+attributes to just .G and .D/B.
+
+The use of the segment information in the VMCB/VMCS is equivalent to a native
+pipeline interacting with the segment cache.  The present bit can therefore
+have a subtly different meaning, and it is now cooked to uniformly indicate
+whether the segment is usable or not.
+
+GDTR and IDTR don't have access rights like the other segments, but for
+consistency, they are treated as being present so no special casing is needed
+elsewhere in the segmentation logic.
+
+AMD hardware does not consider the present bit for %cs and %tr, and will
+function as if they were present.  They are therefore unconditionally set to
+present when reading information from the VMCB, to maintain the new meaning of
+usability.
+
+Intel hardware has a separate unusable bit in the VMCS segment attributes.
+This bit is inverted and stored in the present field, so the hvm code can work
+with architecturally-common state.
+
+This is XSA-191.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Reviewed-by: Jan Beulich <jbeulich_at_suse.com>
+
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+_at_@ -3666,6 +3666,10 @@ int hvm_virtual_to_linear_addr(
+          * COMPATIBILITY MODE: Apply segment checks and add base.
+          */
+ 
++        /* Segment not valid for use (cooked meaning of .p)? */
++        if ( !reg->attr.fields.p )
++            return 0;
++
+         switch ( access_type )
+         {
+         case hvm_access_read:
+_at_@ -3871,6 +3875,10 @@ static int hvm_load_segment_selector(
+     hvm_get_segment_register(
+         v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
+ 
++    /* Segment not valid for use (cooked meaning of .p)? */
++    if ( !desctab.attr.fields.p )
++        goto fail;
++
+     /* Check against descriptor table limit. */
+     if ( ((sel & 0xfff8) + 7) > desctab.limit )
+         goto fail;
+--- a/xen/arch/x86/hvm/svm/svm.c
++++ b/xen/arch/x86/hvm/svm/svm.c
+_at_@ -620,6 +620,7 @@ static void svm_get_segment_register(str
+     {
+     case x86_seg_cs:
+         memcpy(reg, &vmcb->cs, sizeof(*reg));
++        reg->attr.fields.p = 1;
+         reg->attr.fields.g = reg->limit > 0xFFFFF;
+         break;
+     case x86_seg_ds:
+_at_@ -653,13 +654,16 @@ static void svm_get_segment_register(str
+     case x86_seg_tr:
+         svm_sync_vmcb(v);
+         memcpy(reg, &vmcb->tr, sizeof(*reg));
++        reg->attr.fields.p = 1;
+         reg->attr.fields.type |= 0x2;
+         break;
+     case x86_seg_gdtr:
+         memcpy(reg, &vmcb->gdtr, sizeof(*reg));
++        reg->attr.bytes = 0x80;
+         break;
+     case x86_seg_idtr:
+         memcpy(reg, &vmcb->idtr, sizeof(*reg));
++        reg->attr.bytes = 0x80;
+         break;
+     case x86_seg_ldtr:
+         svm_sync_vmcb(v);
+--- a/xen/arch/x86/hvm/vmx/vmx.c
++++ b/xen/arch/x86/hvm/vmx/vmx.c
+_at_@ -867,10 +867,12 @@ void vmx_get_segment_register(struct vcp
+     reg->sel = sel;
+     reg->limit = limit;
+ 
+-    reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00);
+-    /* Unusable flag is folded into Present flag. */
+-    if ( attr & (1u<<16) )
+-        reg->attr.fields.p = 0;
++    /*
++     * Fold VT-x representation into Xen's representation.  The Present bit is
++     * unconditionally set to the inverse of unusable.
++     */
++    reg->attr.bytes =
++        (!(attr & (1u << 16)) << 7) | (attr & 0x7f) | ((attr >> 4) & 0xf00);
+ 
+     /* Adjust for virtual 8086 mode */
+     if ( v->arch.hvm_vmx.vmx_realmode && seg <= x86_seg_tr 
+_at_@ -950,11 +952,11 @@ static void vmx_set_segment_register(str
+         }
+     }
+ 
+-    attr = ((attr & 0xf00) << 4) | (attr & 0xff);
+-
+-    /* Not-present must mean unusable. */
+-    if ( !reg->attr.fields.p )
+-        attr |= (1u << 16);
++    /*
++     * Unfold Xen representation into VT-x representation.  The unusable bit
++     * is unconditionally set to the inverse of present.
++     */
++    attr = (!(attr & (1u << 7)) << 16) | ((attr & 0xf00) << 4) | (attr & 0xff);
+ 
+     /* VMX has strict consistency requirement for flag G. */
+     attr |= !!(limit >> 20) << 15;
+--- a/xen/arch/x86/x86_emulate/x86_emulate.c
++++ b/xen/arch/x86/x86_emulate/x86_emulate.c
+_at_@ -1209,6 +1209,10 @@ protmode_load_seg(
+                                  &desctab, ctxt)) )
+         return rc;
+ 
++    /* Segment not valid for use (cooked meaning of .p)? */
++    if ( !desctab.attr.fields.p )
++        goto raise_exn;
++
+     /* Check against descriptor table limit. */
+     if ( ((sel & 0xfff8) + 7) > desctab.limit )
+         goto raise_exn;
diff --git a/main/xen/xsa192-CVE-2016-9382.patch b/main/xen/xsa192-CVE-2016-9382.patch
new file mode 100644
index 0000000..b573a13
--- /dev/null
+++ b/main/xen/xsa192-CVE-2016-9382.patch
_at_@ -0,0 +1,64 @@
+From: Jan Beulich <jbeulich_at_suse.com>
+Subject: x86/HVM: don't load LDTR with VM86 mode attrs during task switch
+
+Just like TR, LDTR is purely a protected mode facility and hence needs
+to be loaded accordingly. Also move its loading to where it
+architecurally belongs.
+
+This is XSA-192.
+
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Tested-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+_at_@ -2728,17 +2728,16 @@ static void hvm_unmap_entry(void *p)
+ }
+ 
+ static int hvm_load_segment_selector(
+-    enum x86_segment seg, uint16_t sel)
++    enum x86_segment seg, uint16_t sel, unsigned int eflags)
+ {
+     struct segment_register desctab, cs, segr;
+     struct desc_struct *pdesc, desc;
+     u8 dpl, rpl, cpl;
+     bool_t writable;
+     int fault_type = TRAP_invalid_tss;
+-    struct cpu_user_regs *regs = guest_cpu_user_regs();
+     struct vcpu *v = current;
+ 
+-    if ( regs->eflags & X86_EFLAGS_VM )
++    if ( eflags & X86_EFLAGS_VM )
+     {
+         segr.sel = sel;
+         segr.base = (uint32_t)sel << 4;
+_at_@ -2986,6 +2985,8 @@ void hvm_task_switch(
+     if ( rc != HVMCOPY_okay )
+         goto out;
+ 
++    if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt, 0) )
++        goto out;
+ 
+     if ( hvm_set_cr3(tss.cr3, 1) )
+         goto out;
+_at_@ -3008,13 +3009,12 @@ void hvm_task_switch(
+     }
+ 
+     exn_raised = 0;
+-    if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt) ||
+-         hvm_load_segment_selector(x86_seg_es, tss.es) ||
+-         hvm_load_segment_selector(x86_seg_cs, tss.cs) ||
+-         hvm_load_segment_selector(x86_seg_ss, tss.ss) ||
+-         hvm_load_segment_selector(x86_seg_ds, tss.ds) ||
+-         hvm_load_segment_selector(x86_seg_fs, tss.fs) ||
+-         hvm_load_segment_selector(x86_seg_gs, tss.gs) )
++    if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
++         hvm_load_segment_selector(x86_seg_cs, tss.cs, tss.eflags) ||
++         hvm_load_segment_selector(x86_seg_ss, tss.ss, tss.eflags) ||
++         hvm_load_segment_selector(x86_seg_ds, tss.ds, tss.eflags) ||
++         hvm_load_segment_selector(x86_seg_fs, tss.fs, tss.eflags) ||
++         hvm_load_segment_selector(x86_seg_gs, tss.gs, tss.eflags) )
+         exn_raised = 1;
+ 
+     rc = hvm_copy_to_guest_virt(
diff --git a/main/xen/xsa193-4.7-CVE-2016-9385.patch b/main/xen/xsa193-4.7-CVE-2016-9385.patch
new file mode 100644
index 0000000..c5486ef
--- /dev/null
+++ b/main/xen/xsa193-4.7-CVE-2016-9385.patch
_at_@ -0,0 +1,68 @@
+From: Jan Beulich <jbeulich_at_suse.com>
+Subject: x86/PV: writes of %fs and %gs base MSRs require canonical addresses
+
+Commit c42494acb2 ("x86: fix FS/GS base handling when using the
+fsgsbase feature") replaced the use of wrmsr_safe() on these paths
+without recognizing that wr{f,g}sbase() use just wrmsrl() and that the
+WR{F,G}SBASE instructions also raise #GP for non-canonical input.
+
+Similarly arch_set_info_guest() needs to prevent non-canonical
+addresses from getting stored into state later to be loaded by context
+switch code. For consistency also check stack pointers and LDT base.
+DR0..3, otoh, already get properly checked in set_debugreg() (albeit
+we discard the error there).
+
+The SHADOW_GS_BASE check isn't strictly necessary, but I think we
+better avoid trying the WRMSR if we know it's going to fail.
+
+This is XSA-193.
+
+Reported-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+
+--- a/xen/arch/x86/domain.c
++++ b/xen/arch/x86/domain.c
+_at_@ -890,7 +890,13 @@ int arch_set_info_guest(
+     {
+         if ( !compat )
+         {
+-            if ( !is_canonical_address(c.nat->user_regs.eip) ||
++            if ( !is_canonical_address(c.nat->user_regs.rip) ||
++                 !is_canonical_address(c.nat->user_regs.rsp) ||
++                 !is_canonical_address(c.nat->kernel_sp) ||
++                 (c.nat->ldt_ents && !is_canonical_address(c.nat->ldt_base)) ||
++                 !is_canonical_address(c.nat->fs_base) ||
++                 !is_canonical_address(c.nat->gs_base_kernel) ||
++                 !is_canonical_address(c.nat->gs_base_user) ||
+                  !is_canonical_address(c.nat->event_callback_eip) ||
+                  !is_canonical_address(c.nat->syscall_callback_eip) ||
+                  !is_canonical_address(c.nat->failsafe_callback_eip) )
+--- a/xen/arch/x86/traps.c
++++ b/xen/arch/x86/traps.c
+_at_@ -2723,19 +2723,22 @@ static int emulate_privileged_op(struct
+         switch ( regs->_ecx )
+         {
+         case MSR_FS_BASE:
+-            if ( is_pv_32bit_domain(currd) )
++            if ( is_pv_32bit_domain(currd) ||
++                 !is_canonical_address(msr_content) )
+                 goto fail;
+             wrfsbase(msr_content);
+             v->arch.pv_vcpu.fs_base = msr_content;
+             break;
+         case MSR_GS_BASE:
+-            if ( is_pv_32bit_domain(currd) )
++            if ( is_pv_32bit_domain(currd) ||
++                 !is_canonical_address(msr_content) )
+                 goto fail;
+             wrgsbase(msr_content);
+             v->arch.pv_vcpu.gs_base_kernel = msr_content;
+             break;
+         case MSR_SHADOW_GS_BASE:
+-            if ( is_pv_32bit_domain(currd) )
++            if ( is_pv_32bit_domain(currd) ||
++                 !is_canonical_address(msr_content) )
+                 goto fail;
+             if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) )
+                 goto fail;
diff --git a/main/xen/xsa195-CVE-2016-9383.patch b/main/xen/xsa195-CVE-2016-9383.patch
new file mode 100644
index 0000000..a193a5c
--- /dev/null
+++ b/main/xen/xsa195-CVE-2016-9383.patch
_at_@ -0,0 +1,45 @@
+From: Jan Beulich <jbeulich_at_suse.com>
+Subject: x86emul: fix huge bit offset handling
+
+We must never chop off the high 32 bits.
+
+This is XSA-195.
+
+Reported-by: George Dunlap <george.dunlap_at_citrix.com>
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+
+--- a/xen/arch/x86/x86_emulate/x86_emulate.c
++++ b/xen/arch/x86/x86_emulate/x86_emulate.c
+_at_@ -2549,6 +2549,12 @@ x86_emulate(
+         else
+         {
+             /*
++             * Instructions such as bt can reference an arbitrary offset from
++             * their memory operand, but the instruction doing the actual
++             * emulation needs the appropriate op_bytes read from memory.
++             * Adjust both the source register and memory operand to make an
++             * equivalent instruction.
++             *
+              * EA       += BitOffset DIV op_bytes*8
+              * BitOffset = BitOffset MOD op_bytes*8
+              * DIV truncates towards negative infinity.
+_at_@ -2560,14 +2566,15 @@ x86_emulate(
+                 src.val = (int32_t)src.val;
+             if ( (long)src.val < 0 )
+             {
+-                unsigned long byte_offset;
+-                byte_offset = op_bytes + (((-src.val-1) >> 3) & ~(op_bytes-1));
++                unsigned long byte_offset =
++                    op_bytes + (((-src.val - 1) >> 3) & ~(op_bytes - 1L));
++
+                 ea.mem.off -= byte_offset;
+                 src.val = (byte_offset << 3) + src.val;
+             }
+             else
+             {
+-                ea.mem.off += (src.val >> 3) & ~(op_bytes - 1);
++                ea.mem.off += (src.val >> 3) & ~(op_bytes - 1L);
+                 src.val &= (op_bytes << 3) - 1;
+             }
+         }
diff --git a/main/xen/xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch b/main/xen/xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch
new file mode 100644
index 0000000..7193e9a
--- /dev/null
+++ b/main/xen/xsa196-0001-x86-emul-Correct-the-IDT-entry-calculation-in-inject-CVE-2016-9377.patch
_at_@ -0,0 +1,61 @@
+From: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Subject: x86/emul: Correct the IDT entry calculation in inject_swint()
+
+The logic, as introduced in c/s 36ebf14ebe "x86/emulate: support for emulating
+software event injection" is buggy.  The size of an IDT entry depends on long
+mode being active, not the width of the code segment currently in use.
+
+In particular, this means that a compatibility code segment which hits
+emulation for software event injection will end up using an incorrect offset
+in the IDT for DPL/Presence checking.  In practice, this only occurs on old
+AMD hardware lacking NRip support; all newer AMD hardware, and all Intel
+hardware bypass this path in the emulator.
+
+While here, fix a minor issue with reading the IDT entry.  The return value
+from ops->read() wasn't checked, but in reality the only failure case is if a
+pagefault occurs.  This is not a realistic problem as the kernel will almost
+certainly crash with a double fault if this setup actually occured.
+
+This is part of XSA-196.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Reviewed-by: Jan Beulich <jbeulich_at_suse.com>
+---
+ xen/arch/x86/x86_emulate/x86_emulate.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
+index 7a707dc..f74aa8f 100644
+--- a/xen/arch/x86/x86_emulate/x86_emulate.c
++++ b/xen/arch/x86/x86_emulate/x86_emulate.c
+_at_@ -1630,10 +1630,16 @@ static int inject_swint(enum x86_swint_type type,
+     {
+         if ( !in_realmode(ctxt, ops) )
+         {
+-            unsigned int idte_size = (ctxt->addr_size == 64) ? 16 : 8;
+-            unsigned int idte_offset = vector * idte_size;
++            unsigned int idte_size, idte_offset;
+             struct segment_register idtr;
+             uint32_t idte_ctl;
++            int lm = in_longmode(ctxt, ops);
++
++            if ( lm < 0 )
++                return X86EMUL_UNHANDLEABLE;
++
++            idte_size = lm ? 16 : 8;
++            idte_offset = vector * idte_size;
+ 
+             /* icebp sets the External Event bit despite being an instruction. */
+             error_code = (vector << 3) | ECODE_IDT |
+_at_@ -1661,8 +1667,9 @@ static int inject_swint(enum x86_swint_type type,
+              * Should strictly speaking read all 8/16 bytes of an entry,
+              * but we currently only care about the dpl and present bits.
+              */
+-            ops->read(x86_seg_none, idtr.base + idte_offset + 4,
+-                      &idte_ctl, sizeof(idte_ctl), ctxt);
++            if ( (rc = ops->read(x86_seg_none, idtr.base + idte_offset + 4,
++                                 &idte_ctl, sizeof(idte_ctl), ctxt)) )
++                goto done;
+ 
+             /* Is this entry present? */
+             if ( !(idte_ctl & (1u << 15)) )
diff --git a/main/xen/xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch b/main/xen/xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch
new file mode 100644
index 0000000..26580ff
--- /dev/null
+++ b/main/xen/xsa196-0002-x86-svm-Fix-injection-of-software-interrupts-CVE-2016-9378.patch
_at_@ -0,0 +1,76 @@
+From: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Subject: x86/svm: Fix injection of software interrupts
+
+The non-NextRip logic in c/s 36ebf14eb "x86/emulate: support for emulating
+software event injection" was based on an older version of the AMD software
+manual.  The manual was later corrected, following findings from that series.
+
+I took the original wording of "not supported without NextRIP" to mean that
+X86_EVENTTYPE_SW_INTERRUPT was not eligible for use.  It turns out that this
+is not the case, and the new wording is clearer on the matter.
+
+Despite testing the original patch series on non-NRip hardware, the
+swint-emulation XTF test case focuses on the debug vectors; it never ended up
+executing an `int $n` instruction for a vector which wasn't also an exception.
+
+During a vmentry, the use of X86_EVENTTYPE_HW_EXCEPTION comes with a vector
+check to ensure that it is only used with exception vectors.  Xen's use of
+X86_EVENTTYPE_HW_EXCEPTION for `int $n` injection has always been buggy on AMD
+hardware.
+
+Fix this by always using X86_EVENTTYPE_SW_INTERRUPT.
+
+Print and decode the eventinj information in svm_vmcb_dump(), as it has
+several invalid combinations which cause vmentry failures.
+
+This is part of XSA-196.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Reviewed-by: Jan Beulich <jbeulich_at_suse.com>
+---
+ xen/arch/x86/hvm/svm/svm.c      | 13 +++++--------
+ xen/arch/x86/hvm/svm/svmdebug.c |  4 ++++
+ 2 files changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
+index 4391744..76efc3e 100644
+--- a/xen/arch/x86/hvm/svm/svm.c
++++ b/xen/arch/x86/hvm/svm/svm.c
+_at_@ -1231,17 +1231,14 @@ static void svm_inject_trap(const struct hvm_trap *trap)
+     {
+     case X86_EVENTTYPE_SW_INTERRUPT: /* int $n */
+         /*
+-         * Injection type 4 (software interrupt) is only supported with
+-         * NextRIP support.  Without NextRIP, the emulator will have performed
+-         * DPL and presence checks for us.
++         * Software interrupts (type 4) cannot be properly injected if the
++         * processor doesn't support NextRIP.  Without NextRIP, the emulator
++         * will have performed DPL and presence checks for us, and will have
++         * moved eip forward if appropriate.
+          */
+         if ( cpu_has_svm_nrips )
+-        {
+             vmcb->nextrip = regs->eip + _trap.insn_len;
+-            event.fields.type = X86_EVENTTYPE_SW_INTERRUPT;
+-        }
+-        else
+-            event.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
++        event.fields.type = X86_EVENTTYPE_SW_INTERRUPT;
+         break;
+ 
+     case X86_EVENTTYPE_PRI_SW_EXCEPTION: /* icebp */
+diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c
+index ded5d19..f93dfed 100644
+--- a/xen/arch/x86/hvm/svm/svmdebug.c
++++ b/xen/arch/x86/hvm/svm/svmdebug.c
+_at_@ -48,6 +48,10 @@ void svm_vmcb_dump(const char *from, struct vmcb_struct *vmcb)
+            vmcb->tlb_control,
+            (unsigned long long)vmcb->_vintr.bytes,
+            (unsigned long long)vmcb->interrupt_shadow);
++    printk("eventinj %016"PRIx64", valid? %d, ec? %d, type %u, vector %#x\n",
++           vmcb->eventinj.bytes, vmcb->eventinj.fields.v,
++           vmcb->eventinj.fields.ev, vmcb->eventinj.fields.type,
++           vmcb->eventinj.fields.vector);
+     printk("exitcode = %#Lx exitintinfo = %#Lx\n",
+            (unsigned long long)vmcb->exitcode,
+            (unsigned long long)vmcb->exitintinfo.bytes);
diff --git a/main/xen/xsa197-4.6-qemuu-CVE-2016-9381.patch b/main/xen/xsa197-4.6-qemuu-CVE-2016-9381.patch
new file mode 100644
index 0000000..e59a965
--- /dev/null
+++ b/main/xen/xsa197-4.6-qemuu-CVE-2016-9381.patch
_at_@ -0,0 +1,63 @@
+From: Jan Beulich <jbeulich_at_suse.com>
+Subject: xen: fix ioreq handling
+
+Avoid double fetches and bounds check size to avoid overflowing
+internal variables.
+
+This is XSA-197.
+
+Reported-by: yanghongke <yanghongke_at_huawei.com>
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Stefano Stabellini <sstabellini_at_kernel.org>
+
+--- a/tools/qemu-xen/xen-hvm.c
++++ b/tools/qemu-xen/xen-hvm.c
+_at_@ -817,6 +817,10 @@ static void cpu_ioreq_pio(ioreq_t *req)
+ {
+     uint32_t i;
+ 
++    if (req->size > sizeof(uint32_t)) {
++        hw_error("PIO: bad size (%u)", req->size);
++    }
++
+     if (req->dir == IOREQ_READ) {
+         if (!req->data_is_ptr) {
+             req->data = do_inp(req->addr, req->size);
+_at_@ -846,6 +850,10 @@ static void cpu_ioreq_move(ioreq_t *req)
+ {
+     uint32_t i;
+ 
++    if (req->size > sizeof(req->data)) {
++        hw_error("MMIO: bad size (%u)", req->size);
++    }
++
+     if (!req->data_is_ptr) {
+         if (req->dir == IOREQ_READ) {
+             for (i = 0; i < req->count; i++) {
+_at_@ -999,11 +1007,13 @@ static int handle_buffered_iopage(XenIOS
+         req.df = 1;
+         req.type = buf_req->type;
+         req.data_is_ptr = 0;
++        xen_rmb();
+         qw = (req.size == 8);
+         if (qw) {
+             buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
+                                            IOREQ_BUFFER_SLOT_NUM];
+             req.data |= ((uint64_t)buf_req->data) << 32;
++            xen_rmb();
+         }
+ 
+         handle_ioreq(state, &req);
+_at_@ -1034,7 +1044,11 @@ static void cpu_handle_ioreq(void *opaqu
+ 
+     handle_buffered_iopage(state);
+     if (req) {
+-        handle_ioreq(state, req);
++        ioreq_t copy = *req;
++
++        xen_rmb();
++        handle_ioreq(state, &copy);
++        req->data = copy.data;
+ 
+         if (req->state != STATE_IOREQ_INPROCESS) {
+             fprintf(stderr, "Badness in I/O request ... not in service?!: "
diff --git a/main/xen/xsa197-qemut-CVE-2016-9381.patch b/main/xen/xsa197-qemut-CVE-2016-9381.patch
new file mode 100644
index 0000000..3f55bec
--- /dev/null
+++ b/main/xen/xsa197-qemut-CVE-2016-9381.patch
_at_@ -0,0 +1,65 @@
+From: Jan Beulich <jbeulich_at_suse.com>
+Subject: xen: fix ioreq handling
+
+Avoid double fetches and bounds check size to avoid overflowing
+internal variables.
+
+This is XSA-197.
+
+Reported-by: yanghongke <yanghongke_at_huawei.com>
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Ian Jackson <ian.jackson_at_eu.citrix.com>
+
+--- a/tools/qemu-xen-traditional/i386-dm/helper2.c
++++ b/tools/qemu-xen-traditional/i386-dm/helper2.c
+_at_@ -375,6 +375,11 @@ static void cpu_ioreq_pio(CPUState *env,
+ {
+     uint32_t i;
+ 
++    if (req->size > sizeof(unsigned long)) {
++        fprintf(stderr, "PIO: bad size (%u)\n", req->size);
++        exit(-1);
++    }
++
+     if (req->dir == IOREQ_READ) {
+         if (!req->data_is_ptr) {
+             req->data = do_inp(env, req->addr, req->size);
+_at_@ -404,6 +409,11 @@ static void cpu_ioreq_move(CPUState *env
+ {
+     uint32_t i;
+ 
++    if (req->size > sizeof(req->data)) {
++        fprintf(stderr, "MMIO: bad size (%u)\n", req->size);
++        exit(-1);
++    }
++
+     if (!req->data_is_ptr) {
+         if (req->dir == IOREQ_READ) {
+             for (i = 0; i < req->count; i++) {
+_at_@ -516,11 +526,13 @@ static int __handle_buffered_iopage(CPUS
+         req.df = 1;
+         req.type = buf_req->type;
+         req.data_is_ptr = 0;
++        xen_rmb();
+         qw = (req.size == 8);
+         if (qw) {
+             buf_req = &buffered_io_page->buf_ioreq[(rdptr + 1) %
+                                                    IOREQ_BUFFER_SLOT_NUM];
+             req.data |= ((uint64_t)buf_req->data) << 32;
++            xen_rmb();
+         }
+ 
+         __handle_ioreq(env, &req);
+_at_@ -552,7 +564,11 @@ static void cpu_handle_ioreq(void *opaqu
+ 
+     __handle_buffered_iopage(env);
+     if (req) {
+-        __handle_ioreq(env, req);
++        ioreq_t copy = *req;
++
++        xen_rmb();
++        __handle_ioreq(env, &copy);
++        req->data = copy.data;
+ 
+         if (req->state != STATE_IOREQ_INPROCESS) {
+             fprintf(logfile, "Badness in I/O request ... not in service?!: "
diff --git a/main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch b/main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch
new file mode 100644
index 0000000..dbf7084
--- /dev/null
+++ b/main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch
_at_@ -0,0 +1,62 @@
+From 71a389ae940bc52bf897a6e5becd73fd8ede94c5 Mon Sep 17 00:00:00 2001
+From: Ian Jackson <ian.jackson_at_eu.citrix.com>
+Date: Thu, 3 Nov 2016 16:37:40 +0000
+Subject: [PATCH] pygrub: Properly quote results, when returning them to the
+ caller:
+
+* When the caller wants sexpr output, use `repr()'
+  This is what Xend expects.
+
+  The returned S-expressions are now escaped and quoted by Python,
+  generally using '...'.  Previously kernel and ramdisk were unquoted
+  and args was quoted with "..." but without proper escaping.  This
+  change may break toolstacks which do not properly dequote the
+  returned S-expressions.
+
+* When the caller wants "simple" output, crash if the delimiter is
+  contained in the returned value.
+
+  With --output-format=simple it does not seem like this could ever
+  happen, because the bootloader config parsers all take line-based
+  input from the various bootloader config files.
+
+  With --output-format=simple0, this can happen if the bootloader
+  config file contains nul bytes.
+
+This is XSA-198.
+
+Signed-off-by: Ian Jackson <Ian.Jackson_at_eu.citrix.com>
+Tested-by: Ian Jackson <Ian.Jackson_at_eu.citrix.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+---
+ tools/pygrub/src/pygrub | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/tools/pygrub/src/pygrub b/tools/pygrub/src/pygrub
+index 40f9584..dd0c8f7 100755
+--- a/tools/pygrub/src/pygrub
++++ b/tools/pygrub/src/pygrub
+_at_@ -721,14 +721,17 @@ def sniff_netware(fs, cfg):
+     return cfg
+ 
+ def format_sxp(kernel, ramdisk, args):
+-    s = "linux (kernel %s)" % kernel
++    s = "linux (kernel %s)" % repr(kernel)
+     if ramdisk:
+-        s += "(ramdisk %s)" % ramdisk
++        s += "(ramdisk %s)" % repr(ramdisk)
+     if args:
+-        s += "(args \"%s\")" % args
++        s += "(args %s)" % repr(args)
+     return s
+                 
+ def format_simple(kernel, ramdisk, args, sep):
++    for check in (kernel, ramdisk, args):
++        if check is not None and sep in check:
++            raise RuntimeError, "simple format cannot represent delimiter-containing value"
+     s = ("kernel %s" % kernel) + sep
+     if ramdisk:
+         s += ("ramdisk %s" % ramdisk) + sep
+-- 
+2.1.4
+
-- 
2.6.6
---
Unsubscribe:  alpine-aports+unsubscribe_at_lists.alpinelinux.org
Help:         alpine-aports+help_at_lists.alpinelinux.org
---
Received on Wed Dec 07 2016 - 14:40:15 GMT