Mail archive
alpine-aports

[alpine-aports] [PATCH v3.1] main/xen: security upgrade - fixes #6352, #6499

From: Sergey Lukin <sergej.lukin_at_gmail.com>
Date: Mon, 12 Dec 2016 11:14:51 +0000

CVE-2016-7777
CVE-2016-9386, CVE-2016-9382, CVE-2016-9385, CVE-2016-9383,
CVE-2016-9381, CVE-2016-9379, CVE-2016-9380
---
 main/xen/APKBUILD                                 |  47 ++++++-
 main/xen/xsa190-4.5-CVE-2016-7777.patch           | 163 ++++++++++++++++++++++
 main/xen/xsa191-4.6-CVE-2016-9386.patch           | 138 ++++++++++++++++++
 main/xen/xsa192-4.5-CVE-2016-9382.patch           |  63 +++++++++
 main/xen/xsa193-4.5-CVE-2016-9385.patch           |  65 +++++++++
 main/xen/xsa195-CVE-2016-9383.patch               |  45 ++++++
 main/xen/xsa197-4.4-qemuu-CVE-2016-9381.patch     |  63 +++++++++
 main/xen/xsa197-4.5-qemut-CVE-2016-9381.patch     |  65 +++++++++
 main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch |  62 ++++++++
 9 files changed, 704 insertions(+), 7 deletions(-)
 create mode 100644 main/xen/xsa190-4.5-CVE-2016-7777.patch
 create mode 100644 main/xen/xsa191-4.6-CVE-2016-9386.patch
 create mode 100644 main/xen/xsa192-4.5-CVE-2016-9382.patch
 create mode 100644 main/xen/xsa193-4.5-CVE-2016-9385.patch
 create mode 100644 main/xen/xsa195-CVE-2016-9383.patch
 create mode 100644 main/xen/xsa197-4.4-qemuu-CVE-2016-9381.patch
 create mode 100644 main/xen/xsa197-4.5-qemut-CVE-2016-9381.patch
 create mode 100644 main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index b0707d7..40b2991 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
_at_@ -1,9 +1,10 @@
 # Contributor: William Pitcock <nenolod_at_dereferenced.org>
 # Contributor: Roger Pau Monne <roger.pau_at_entel.upc.edu>
+# Contributor: Sergey Lukin <sergej.lukin_at_gmail.com>
 # Maintainer: William Pitcock <nenolod_at_dereferenced.org>
 pkgname=xen
 pkgver=4.4.4
-pkgrel=1
+pkgrel=2
 pkgdesc="Xen hypervisor"
 url="http://www.xen.org/"
 arch="x86_64"
_at_@ -29,6 +30,14 @@ source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.g
 	xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
 	xsa187-4.4-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
 	xsa188.patch
+	xsa190-4.5-CVE-2016-7777.patch
+	xsa191-4.6-CVE-2016-9386.patch
+	xsa192-4.5-CVE-2016-9382.patch
+	xsa193-4.5-CVE-2016-9385.patch
+	xsa195-CVE-2016-9383.patch
+	xsa197-4.4-qemuu-CVE-2016-9381.patch
+	xsa197-4.5-qemut-CVE-2016-9381.patch
+	xsa198-CVE-2016-9379-CVE-2016-9380.patch
 
 	0001-libxl-Record-backend-frontend-paths-in-libxl-DOMID.patch
 	0002-libxl-Provide-libxl__backendpath_parse_domid.patch
_at_@ -85,7 +94,7 @@ source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.g
 	xenqemu.initd
 	"
 
-_builddir="$srcdir"/$pkgname-$pkgver
+builddir="$srcdir"/$pkgname-$pkgver
 
 # security fixes:
 #   4.4.4-r0:
_at_@ -98,7 +107,7 @@ _builddir="$srcdir"/$pkgname-$pkgver
 
 prepare() {
 	local i
-	cd "$_builddir"
+	cd "$builddir"
 
 	for i in $source; do
 		case $i in
_at_@ -108,7 +117,7 @@ prepare() {
 
 	# install our stdint_local.h and elf_local.h
 	install "$srcdir"/stdint_local.h "$srcdir"/elf_local.h \
-		"$_builddir"/tools/firmware/ || return 1
+		"$builddir"/tools/firmware/ || return 1
 
 	# remove all -Werror
 	msg "Eradicating -Werror..."
_at_@ -141,7 +150,7 @@ munge_cflags() {
 # to invoke specific tasks like building the hypervisor.  i.e.
 #    $ abuild configure build_tools
 configure() {
-	cd "$_builddir"
+	cd "$builddir"
 
 	msg "Running configure..."
 	./configure --prefix=/usr \
_at_@ -179,7 +188,7 @@ build_stubdom() {
 }
 
 build() {
-	cd "$_builddir"
+	cd "$builddir"
 
 	configure || return 1
 	build_hypervisor || return 1
_at_@ -191,7 +200,7 @@ build() {
 }
 
 package() {
-	cd "$_builddir"
+	cd "$builddir"
 
 	unset CFLAGS
 	unset LDFLAGS
_at_@ -271,6 +280,14 @@ cc0904605d03a9e4f6f21d16824e41c9  xsa184-qemuu-master.patch
 c426383254acdcbb9466bbec2d6f8d9b  xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
 a7545557908b8e2580af85b7ec680e43  xsa187-4.4-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
 adc72106be77107b379a62aa61294519  xsa188.patch
+478b88d2ef7e67bc03d3637def41a485  xsa190-4.5-CVE-2016-7777.patch
+5399accd478266047e9fada57bba1bf8  xsa191-4.6-CVE-2016-9386.patch
+fa8512910a0dbe7f49b1800518f9c204  xsa192-4.5-CVE-2016-9382.patch
+93e1b2cd72ef9c449fa848f79f1c653a  xsa193-4.5-CVE-2016-9385.patch
+03ee88fdd719a6e2cdd53b698b14bfa0  xsa195-CVE-2016-9383.patch
+f6bcf6352072b9eeec8c15c27237d486  xsa197-4.4-qemuu-CVE-2016-9381.patch
+5e27d1f5d0e5e72613a73d7666fed8d5  xsa197-4.5-qemut-CVE-2016-9381.patch
+e8d3ee1e904071920a6afbbf6a27aad2  xsa198-CVE-2016-9379-CVE-2016-9380.patch
 d598b31823a2b4398fb2f9c39aec0cff  0001-libxl-Record-backend-frontend-paths-in-libxl-DOMID.patch
 761926907618592763ba75fce4c02c57  0002-libxl-Provide-libxl__backendpath_parse_domid.patch
 b5e297a4a111f897b403d70c25158adb  0003-libxl-Do-not-trust-frontend-in-libxl__devices_destro.patch
_at_@ -328,6 +345,14 @@ e61c52477a8d8aa79111d686b103202ff8a558d8b3356635288c1290789b7eb3  xsa176.patch
 be9fe85d36c2c1fbca246c1f4d834c3ef11b6ab3d5467da0ac8c079aa5a68de9  xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
 727b18ae83001f7ea04613aa7199ada3e6a84939aa44516f7c426e609d383b2a  xsa187-4.4-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
 9f374c2e1437ad71369f41275e7b333e7b7691a783ba693ee567c899bd78c722  xsa188.patch
+477d56c41cc2101432459ab79e4d5663aade779c36285f5c1d6d6ed4e34e1009  xsa190-4.5-CVE-2016-7777.patch
+d95a1f0dd5c45497ca56e2e1390fc688bf0a4a7a7fd10c65ae25b4bbb3353b69  xsa191-4.6-CVE-2016-9386.patch
+bb0c6622c6f5c5eb9a680020d865802069446830b4a170bcb82336f6c3b77f55  xsa192-4.5-CVE-2016-9382.patch
+b3494b1fe5fefc0d032bd603340e364c880ec0d3ae3fb8aa3a773038e956f955  xsa193-4.5-CVE-2016-9385.patch
+6ab5f13b81e3bbf6096020f4c3beeffaff67a075cab67e033ba27d199b41cec1  xsa195-CVE-2016-9383.patch
+c4cee04acde95a94a644dc6de9cbf1af91bd7931e492ebf1e076328eaa9b4888  xsa197-4.4-qemuu-CVE-2016-9381.patch
+d662353629117b9c978cf5444995b41e77b079cc665e078ae7868b715c47c382  xsa197-4.5-qemut-CVE-2016-9381.patch
+0e4533ad2157c03ab309bd12a54f5ff325f03edbe97f23c60a16a3f378c75eae  xsa198-CVE-2016-9379-CVE-2016-9380.patch
 cefe2c82a30227b6538c6924d7d939192be3c481e48ac94c82f4c51f60388570  0001-libxl-Record-backend-frontend-paths-in-libxl-DOMID.patch
 f24b26891fac4e8bf8a0939a5b64fc7ad096ef699f1882aad6e96cf81b85fc3e  0002-libxl-Provide-libxl__backendpath_parse_domid.patch
 748ea9d369b1f8372d1a4c420e6a9d90f881b7142e7913ed5d72b99c07ac11a0  0003-libxl-Do-not-trust-frontend-in-libxl__devices_destro.patch
_at_@ -385,6 +410,14 @@ c11965a710eb0ff7b3f7d4797882d6607e8091159b06267a190dc12e0292370a7682a8ec3b7036bb
 d85bc3c56805ff5b3df6b85b2b34ff97d15fe254fc5a873b5c43c2c15564eea42753723a6296292a543e7b7dc83ad71f0fafe01fa6a6ebf82fa0a7268fc67486  xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch
 cb3bcaa104a1e1a45f1fcb90682c1496f008d603dc9ea63e9b815628f6d1b6d439d450f67d633db34a08368b9594fb5aca70aa6c77035b24ae8f09f69d2b56db  xsa187-4.4-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch
 171182bf8fd2d546a58bdd695729b24b6e8d121d6236dfb4c50144ee7697ae43c2d30d01af0412a7a60caabd79126b8eb94029192b4619cfefeca883d73d7991  xsa188.patch
+23ca5b5c86186b3683be38b65ed30d0ddf15d9ae29e462ae9b2606d78d84ceafa3913a2648d0430872aef1e6209c443b81d8bd4ae4c01b9021c75db1ed05ba5a  xsa190-4.5-CVE-2016-7777.patch
+502f50bece05d52b127c497eda0236a9011e56885fb0b5fac74ab449c2eac94d0f2cf64da16808c25f8a3091aef0a9586ad5c19f6b98a8c459908149d629b321  xsa191-4.6-CVE-2016-9386.patch
+d158cd493ccc516814201cb147ad99688223437938e655c5c8d75c2b73e14c319dc94e6077a9ec6521f2ca5e6af5d118f923f333702a83266c0ba44cc18efa9e  xsa192-4.5-CVE-2016-9382.patch
+e9209ac29f8ed7362f9f83d1abc99a5e1b1b4edd83c652343aa23b4b3ac0c74e0f5e10593872022958930dd65cb8062d286fba9b77a167a12108687005fab0a4  xsa193-4.5-CVE-2016-9385.patch
+2b32a360c13590f24de8ebb1cd18eb17eada444034a394739c21306be708ba5924ea1448e0d120c0f61f9472bce45e80439e3fd5779f4be72d367ce5c55b6ec0  xsa195-CVE-2016-9383.patch
+7f2792b288ac934428b0221bac0fde17ad8af873231b4c46ababdadb3a87650dc3a4d5089455eb9c319d7fe29ea878ad2e43140e10bdd86062e8821260da9428  xsa197-4.4-qemuu-CVE-2016-9381.patch
+3d1cedf051cd43c56366579d87e5c53da28ba1b02c11f5624265e81ede8dd9650fd7a7ba4129c6a6262bfbbde7d0960c0b04e72975f52eefb733e12c9e69f02d  xsa197-4.5-qemut-CVE-2016-9381.patch
+b61429fbf4d1677a8dab2710ab21335f18b3f998f2e5e19e45a4727f71b9671b3d1bd709bef3594cbaa5a47f339c3b8a5cccf11dd361b993aa76d242b825549c  xsa198-CVE-2016-9379-CVE-2016-9380.patch
 a78d27cbd0090521854a10d83180d58b4050c1ab8d8a7eef4639240ea24df5a03d8638795589479010a5de1a4df3ce64f705afc8971786ad981a87a545778014  0001-libxl-Record-backend-frontend-paths-in-libxl-DOMID.patch
 c7142a142413542e291df407621c16b7d3c0a4b8cfb4c3287f2be012ea72485cb2b8d70cffc3cf4393f664f7154e9cbe4a436ead044ef820f7ab8eee7f445625  0002-libxl-Provide-libxl__backendpath_parse_domid.patch
 fd9d3d113ecb74f8589a792e5461e61d2664939e601744671a064194e2c603533e01b3abd8ae42ce203916f9055b5438d481dd47236b84b20d8129b4159ee4ae  0003-libxl-Do-not-trust-frontend-in-libxl__devices_destro.patch
diff --git a/main/xen/xsa190-4.5-CVE-2016-7777.patch b/main/xen/xsa190-4.5-CVE-2016-7777.patch
new file mode 100644
index 0000000..d103295
--- /dev/null
+++ b/main/xen/xsa190-4.5-CVE-2016-7777.patch
_at_@ -0,0 +1,163 @@
+x86emul: honor guest CR0.TS and CR0.EM
+
+We must not emulate any instructions accessing respective registers
+when either of these flags is set in the guest view of the register, or
+else we may do so on data not belonging to the guest's current task.
+
+Being architecturally required behavior, the logic gets placed in the
+instruction emulator instead of hvmemul_get_fpu(). It should be noted,
+though, that hvmemul_get_fpu() being the only current handler for the
+get_fpu() callback, we don't have an active problem with CR4: Both
+CR4.OSFXSR and CR4.OSXSAVE get handled as necessary by that function.
+
+This is XSA-190.
+
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+
+--- a/tools/tests/x86_emulator/test_x86_emulator.c
++++ b/tools/tests/x86_emulator/test_x86_emulator.c
+_at_@ -129,6 +129,22 @@ static inline uint64_t xgetbv(uint32_t x
+     (ebx & (1U << 5)) != 0; \
+ })
+ 
++static int read_cr(
++    unsigned int reg,
++    unsigned long *val,
++    struct x86_emulate_ctxt *ctxt)
++{
++    /* Fake just enough state for the emulator's _get_fpu() to be happy. */
++    switch ( reg )
++    {
++    case 0:
++        *val = 0x00000001; /* PE */
++        return X86EMUL_OKAY;
++    }
++
++    return X86EMUL_UNHANDLEABLE;
++}
++
+ int get_fpu(
+     void (*exception_callback)(void *, struct cpu_user_regs *),
+     void *exception_callback_arg,
+_at_@ -160,6 +176,7 @@ static struct x86_emulate_ops emulops =
+     .write      = write,
+     .cmpxchg    = cmpxchg,
+     .cpuid      = cpuid,
++    .read_cr    = read_cr,
+     .get_fpu    = get_fpu,
+ };
+ 
+--- a/xen/arch/x86/hvm/emulate.c
++++ b/xen/arch/x86/hvm/emulate.c
+_at_@ -1192,6 +1192,7 @@ static int hvmemul_get_fpu(
+     switch ( type )
+     {
+     case X86EMUL_FPU_fpu:
++    case X86EMUL_FPU_wait:
+         break;
+     case X86EMUL_FPU_mmx:
+         if ( !cpu_has_mmx )
+_at_@ -1199,7 +1200,6 @@ static int hvmemul_get_fpu(
+         break;
+     case X86EMUL_FPU_xmm:
+         if ( !cpu_has_xmm ||
+-             (curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_EM) ||
+              !(curr->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSFXSR) )
+             return X86EMUL_UNHANDLEABLE;
+         break;
+--- a/xen/arch/x86/x86_emulate/x86_emulate.c
++++ b/xen/arch/x86/x86_emulate/x86_emulate.c
+_at_@ -373,6 +373,9 @@ typedef union {
+ 
+ /* Control register flags. */
+ #define CR0_PE    (1<<0)
++#define CR0_MP    (1<<1)
++#define CR0_EM    (1<<2)
++#define CR0_TS    (1<<3)
+ #define CR4_TSD   (1<<2)
+ 
+ /* EFLAGS bit definitions. */
+_at_@ -400,6 +403,7 @@ typedef union {
+ #define EXC_OF  4
+ #define EXC_BR  5
+ #define EXC_UD  6
++#define EXC_NM  7
+ #define EXC_TS 10
+ #define EXC_NP 11
+ #define EXC_SS 12
+_at_@ -684,10 +688,45 @@ static void fpu_handle_exception(void *_
+     regs->eip += fic->insn_bytes;
+ }
+ 
++static int _get_fpu(
++    enum x86_emulate_fpu_type type,
++    struct fpu_insn_ctxt *fic,
++    struct x86_emulate_ctxt *ctxt,
++    const struct x86_emulate_ops *ops)
++{
++    int rc;
++
++    fic->exn_raised = 0;
++
++    fail_if(!ops->get_fpu);
++    rc = ops->get_fpu(fpu_handle_exception, fic, type, ctxt);
++
++    if ( rc == X86EMUL_OKAY )
++    {
++        unsigned long cr0;
++
++        fail_if(!ops->read_cr);
++        rc = ops->read_cr(0, &cr0, ctxt);
++        if ( rc != X86EMUL_OKAY )
++            return rc;
++        if ( cr0 & CR0_EM )
++        {
++            generate_exception_if(type == X86EMUL_FPU_fpu, EXC_NM, -1);
++            generate_exception_if(type == X86EMUL_FPU_mmx, EXC_UD, -1);
++            generate_exception_if(type == X86EMUL_FPU_xmm, EXC_UD, -1);
++        }
++        generate_exception_if((cr0 & CR0_TS) &&
++                              (type != X86EMUL_FPU_wait || (cr0 & CR0_MP)),
++                              EXC_NM, -1);
++    }
++
++ done:
++    return rc;
++}
++
+ #define get_fpu(_type, _fic)                                    \
+-do{ (_fic)->exn_raised = 0;                                     \
+-    fail_if(ops->get_fpu == NULL);                              \
+-    rc = ops->get_fpu(fpu_handle_exception, _fic, _type, ctxt); \
++do {                                                            \
++    rc = _get_fpu(_type, _fic, ctxt, ops);                      \
+     if ( rc ) goto done;                                        \
+ } while (0)
+ #define put_fpu(_fic)                                           \
+_at_@ -2491,8 +2530,14 @@ x86_emulate(
+     }
+ 
+     case 0x9b:  /* wait/fwait */
+-        emulate_fpu_insn("fwait");
++    {
++        struct fpu_insn_ctxt fic = { .insn_bytes = 1 };
++
++        get_fpu(X86EMUL_FPU_wait, &fic);
++        asm volatile ( "fwait" ::: "memory" );
++        put_fpu(&fic);
+         break;
++    }
+ 
+     case 0x9c: /* pushf */
+         src.val = _regs.eflags;
+--- a/xen/arch/x86/x86_emulate/x86_emulate.h
++++ b/xen/arch/x86/x86_emulate/x86_emulate.h
+_at_@ -114,6 +114,7 @@ struct __packed segment_register {
+ /* FPU sub-types which may be requested via ->get_fpu(). */
+ enum x86_emulate_fpu_type {
+     X86EMUL_FPU_fpu, /* Standard FPU coprocessor instruction set */
++    X86EMUL_FPU_wait, /* WAIT/FWAIT instruction */
+     X86EMUL_FPU_mmx, /* MMX instruction set (%mm0-%mm7) */
+     X86EMUL_FPU_xmm, /* SSE instruction set (%xmm0-%xmm7/15) */
+     X86EMUL_FPU_ymm  /* AVX/XOP instruction set (%ymm0-%ymm7/15) */
diff --git a/main/xen/xsa191-4.6-CVE-2016-9386.patch b/main/xen/xsa191-4.6-CVE-2016-9386.patch
new file mode 100644
index 0000000..d661d0c
--- /dev/null
+++ b/main/xen/xsa191-4.6-CVE-2016-9386.patch
_at_@ -0,0 +1,138 @@
+From: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Subject: x86/hvm: Fix the handling of non-present segments
+
+In 32bit, the data segments may be NULL to indicate that the segment is
+ineligible for use.  In both 32bit and 64bit, the LDT selector may be NULL to
+indicate that the entire LDT is ineligible for use.  However, nothing in Xen
+actually checks for this condition when performing other segmentation
+checks.  (Note however that limit and writeability checks are correctly
+performed).
+
+Neither Intel nor AMD specify the exact behaviour of loading a NULL segment.
+Experimentally, AMD zeroes all attributes but leaves the base and limit
+unmodified.  Intel zeroes the base, sets the limit to 0xfffffff and resets the
+attributes to just .G and .D/B.
+
+The use of the segment information in the VMCB/VMCS is equivalent to a native
+pipeline interacting with the segment cache.  The present bit can therefore
+have a subtly different meaning, and it is now cooked to uniformly indicate
+whether the segment is usable or not.
+
+GDTR and IDTR don't have access rights like the other segments, but for
+consistency, they are treated as being present so no special casing is needed
+elsewhere in the segmentation logic.
+
+AMD hardware does not consider the present bit for %cs and %tr, and will
+function as if they were present.  They are therefore unconditionally set to
+present when reading information from the VMCB, to maintain the new meaning of
+usability.
+
+Intel hardware has a separate unusable bit in the VMCS segment attributes.
+This bit is inverted and stored in the present field, so the hvm code can work
+with architecturally-common state.
+
+This is XSA-191.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Reviewed-by: Jan Beulich <jbeulich_at_suse.com>
+
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+_at_@ -3666,6 +3666,10 @@ int hvm_virtual_to_linear_addr(
+          * COMPATIBILITY MODE: Apply segment checks and add base.
+          */
+ 
++        /* Segment not valid for use (cooked meaning of .p)? */
++        if ( !reg->attr.fields.p )
++            return 0;
++
+         switch ( access_type )
+         {
+         case hvm_access_read:
+_at_@ -3871,6 +3875,10 @@ static int hvm_load_segment_selector(
+     hvm_get_segment_register(
+         v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
+ 
++    /* Segment not valid for use (cooked meaning of .p)? */
++    if ( !desctab.attr.fields.p )
++        goto fail;
++
+     /* Check against descriptor table limit. */
+     if ( ((sel & 0xfff8) + 7) > desctab.limit )
+         goto fail;
+--- a/xen/arch/x86/hvm/svm/svm.c
++++ b/xen/arch/x86/hvm/svm/svm.c
+_at_@ -620,6 +620,7 @@ static void svm_get_segment_register(str
+     {
+     case x86_seg_cs:
+         memcpy(reg, &vmcb->cs, sizeof(*reg));
++        reg->attr.fields.p = 1;
+         reg->attr.fields.g = reg->limit > 0xFFFFF;
+         break;
+     case x86_seg_ds:
+_at_@ -653,13 +654,16 @@ static void svm_get_segment_register(str
+     case x86_seg_tr:
+         svm_sync_vmcb(v);
+         memcpy(reg, &vmcb->tr, sizeof(*reg));
++        reg->attr.fields.p = 1;
+         reg->attr.fields.type |= 0x2;
+         break;
+     case x86_seg_gdtr:
+         memcpy(reg, &vmcb->gdtr, sizeof(*reg));
++        reg->attr.bytes = 0x80;
+         break;
+     case x86_seg_idtr:
+         memcpy(reg, &vmcb->idtr, sizeof(*reg));
++        reg->attr.bytes = 0x80;
+         break;
+     case x86_seg_ldtr:
+         svm_sync_vmcb(v);
+--- a/xen/arch/x86/hvm/vmx/vmx.c
++++ b/xen/arch/x86/hvm/vmx/vmx.c
+_at_@ -867,10 +867,12 @@ void vmx_get_segment_register(struct vcp
+     reg->sel = sel;
+     reg->limit = limit;
+ 
+-    reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00);
+-    /* Unusable flag is folded into Present flag. */
+-    if ( attr & (1u<<16) )
+-        reg->attr.fields.p = 0;
++    /*
++     * Fold VT-x representation into Xen's representation.  The Present bit is
++     * unconditionally set to the inverse of unusable.
++     */
++    reg->attr.bytes =
++        (!(attr & (1u << 16)) << 7) | (attr & 0x7f) | ((attr >> 4) & 0xf00);
+ 
+     /* Adjust for virtual 8086 mode */
+     if ( v->arch.hvm_vmx.vmx_realmode && seg <= x86_seg_tr 
+_at_@ -950,11 +952,11 @@ static void vmx_set_segment_register(str
+         }
+     }
+ 
+-    attr = ((attr & 0xf00) << 4) | (attr & 0xff);
+-
+-    /* Not-present must mean unusable. */
+-    if ( !reg->attr.fields.p )
+-        attr |= (1u << 16);
++    /*
++     * Unfold Xen representation into VT-x representation.  The unusable bit
++     * is unconditionally set to the inverse of present.
++     */
++    attr = (!(attr & (1u << 7)) << 16) | ((attr & 0xf00) << 4) | (attr & 0xff);
+ 
+     /* VMX has strict consistency requirement for flag G. */
+     attr |= !!(limit >> 20) << 15;
+--- a/xen/arch/x86/x86_emulate/x86_emulate.c
++++ b/xen/arch/x86/x86_emulate/x86_emulate.c
+_at_@ -1209,6 +1209,10 @@ protmode_load_seg(
+                                  &desctab, ctxt)) )
+         return rc;
+ 
++    /* Segment not valid for use (cooked meaning of .p)? */
++    if ( !desctab.attr.fields.p )
++        goto raise_exn;
++
+     /* Check against descriptor table limit. */
+     if ( ((sel & 0xfff8) + 7) > desctab.limit )
+         goto raise_exn;
diff --git a/main/xen/xsa192-4.5-CVE-2016-9382.patch b/main/xen/xsa192-4.5-CVE-2016-9382.patch
new file mode 100644
index 0000000..9ed94fc
--- /dev/null
+++ b/main/xen/xsa192-4.5-CVE-2016-9382.patch
_at_@ -0,0 +1,63 @@
+From: Jan Beulich <jbeulich_at_suse.com>
+Subject: x86/HVM: don't load LDTR with VM86 mode attrs during task switch
+
+Just like TR, LDTR is purely a protected mode facility and hence needs
+to be loaded accordingly. Also move its loading to where it
+architecurally belongs.
+
+This is XSA-192.
+
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Tested-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+_at_@ -3577,16 +3577,15 @@ static void hvm_unmap_entry(void *p)
+ }
+ 
+ static int hvm_load_segment_selector(
+-    enum x86_segment seg, uint16_t sel)
++    enum x86_segment seg, uint16_t sel, unsigned int eflags)
+ {
+     struct segment_register desctab, cs, segr;
+     struct desc_struct *pdesc, desc;
+     u8 dpl, rpl, cpl;
+     int fault_type = TRAP_invalid_tss;
+-    struct cpu_user_regs *regs = guest_cpu_user_regs();
+     struct vcpu *v = current;
+ 
+-    if ( regs->eflags & X86_EFLAGS_VM )
++    if ( eflags & X86_EFLAGS_VM )
+     {
+         segr.sel = sel;
+         segr.base = (uint32_t)sel << 4;
+_at_@ -3829,6 +3828,8 @@ void hvm_task_switch(
+     if ( rc != HVMCOPY_okay )
+         goto out;
+ 
++    if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt, 0) )
++        goto out;
+ 
+     if ( hvm_set_cr3(tss.cr3) )
+         goto out;
+_at_@ -3851,13 +3852,12 @@ void hvm_task_switch(
+     }
+ 
+     exn_raised = 0;
+-    if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt) ||
+-         hvm_load_segment_selector(x86_seg_es, tss.es) ||
+-         hvm_load_segment_selector(x86_seg_cs, tss.cs) ||
+-         hvm_load_segment_selector(x86_seg_ss, tss.ss) ||
+-         hvm_load_segment_selector(x86_seg_ds, tss.ds) ||
+-         hvm_load_segment_selector(x86_seg_fs, tss.fs) ||
+-         hvm_load_segment_selector(x86_seg_gs, tss.gs) )
++    if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
++         hvm_load_segment_selector(x86_seg_cs, tss.cs, tss.eflags) ||
++         hvm_load_segment_selector(x86_seg_ss, tss.ss, tss.eflags) ||
++         hvm_load_segment_selector(x86_seg_ds, tss.ds, tss.eflags) ||
++         hvm_load_segment_selector(x86_seg_fs, tss.fs, tss.eflags) ||
++         hvm_load_segment_selector(x86_seg_gs, tss.gs, tss.eflags) )
+         exn_raised = 1;
+ 
+     rc = hvm_copy_to_guest_virt(
diff --git a/main/xen/xsa193-4.5-CVE-2016-9385.patch b/main/xen/xsa193-4.5-CVE-2016-9385.patch
new file mode 100644
index 0000000..4906ce7
--- /dev/null
+++ b/main/xen/xsa193-4.5-CVE-2016-9385.patch
_at_@ -0,0 +1,65 @@
+From: Jan Beulich <jbeulich_at_suse.com>
+Subject: x86/PV: writes of %fs and %gs base MSRs require canonical addresses
+
+Commit c42494acb2 ("x86: fix FS/GS base handling when using the
+fsgsbase feature") replaced the use of wrmsr_safe() on these paths
+without recognizing that wr{f,g}sbase() use just wrmsrl() and that the
+WR{F,G}SBASE instructions also raise #GP for non-canonical input.
+
+Similarly arch_set_info_guest() needs to prevent non-canonical
+addresses from getting stored into state later to be loaded by context
+switch code. For consistency also check stack pointers and LDT base.
+DR0..3, otoh, already get properly checked in set_debugreg() (albeit
+we discard the error there).
+
+The SHADOW_GS_BASE check isn't strictly necessary, but I think we
+better avoid trying the WRMSR if we know it's going to fail.
+
+This is XSA-193.
+
+Reported-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+
+--- a/xen/arch/x86/domain.c
++++ b/xen/arch/x86/domain.c
+_at_@ -741,7 +741,13 @@ int arch_set_info_guest(
+     {
+         if ( !compat )
+         {
+-            if ( !is_canonical_address(c.nat->user_regs.eip) ||
++            if ( !is_canonical_address(c.nat->user_regs.rip) ||
++                 !is_canonical_address(c.nat->user_regs.rsp) ||
++                 !is_canonical_address(c.nat->kernel_sp) ||
++                 (c.nat->ldt_ents && !is_canonical_address(c.nat->ldt_base)) ||
++                 !is_canonical_address(c.nat->fs_base) ||
++                 !is_canonical_address(c.nat->gs_base_kernel) ||
++                 !is_canonical_address(c.nat->gs_base_user) ||
+                  !is_canonical_address(c.nat->event_callback_eip) ||
+                  !is_canonical_address(c.nat->syscall_callback_eip) ||
+                  !is_canonical_address(c.nat->failsafe_callback_eip) )
+--- a/xen/arch/x86/traps.c
++++ b/xen/arch/x86/traps.c
+_at_@ -2439,19 +2439,19 @@ static int emulate_privileged_op(struct
+         switch ( (u32)regs->ecx )
+         {
+         case MSR_FS_BASE:
+-            if ( is_pv_32on64_vcpu(v) )
++            if ( is_pv_32on64_vcpu(v) || !is_canonical_address(msr_content) )
+                 goto fail;
+             wrfsbase(msr_content);
+             v->arch.pv_vcpu.fs_base = msr_content;
+             break;
+         case MSR_GS_BASE:
+-            if ( is_pv_32on64_vcpu(v) )
++            if ( is_pv_32on64_vcpu(v) || !is_canonical_address(msr_content) )
+                 goto fail;
+             wrgsbase(msr_content);
+             v->arch.pv_vcpu.gs_base_kernel = msr_content;
+             break;
+         case MSR_SHADOW_GS_BASE:
+-            if ( is_pv_32on64_vcpu(v) )
++            if ( is_pv_32on64_vcpu(v) || !is_canonical_address(msr_content) )
+                 goto fail;
+             if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) )
+                 goto fail;
diff --git a/main/xen/xsa195-CVE-2016-9383.patch b/main/xen/xsa195-CVE-2016-9383.patch
new file mode 100644
index 0000000..a193a5c
--- /dev/null
+++ b/main/xen/xsa195-CVE-2016-9383.patch
_at_@ -0,0 +1,45 @@
+From: Jan Beulich <jbeulich_at_suse.com>
+Subject: x86emul: fix huge bit offset handling
+
+We must never chop off the high 32 bits.
+
+This is XSA-195.
+
+Reported-by: George Dunlap <george.dunlap_at_citrix.com>
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+
+--- a/xen/arch/x86/x86_emulate/x86_emulate.c
++++ b/xen/arch/x86/x86_emulate/x86_emulate.c
+_at_@ -2549,6 +2549,12 @@ x86_emulate(
+         else
+         {
+             /*
++             * Instructions such as bt can reference an arbitrary offset from
++             * their memory operand, but the instruction doing the actual
++             * emulation needs the appropriate op_bytes read from memory.
++             * Adjust both the source register and memory operand to make an
++             * equivalent instruction.
++             *
+              * EA       += BitOffset DIV op_bytes*8
+              * BitOffset = BitOffset MOD op_bytes*8
+              * DIV truncates towards negative infinity.
+_at_@ -2560,14 +2566,15 @@ x86_emulate(
+                 src.val = (int32_t)src.val;
+             if ( (long)src.val < 0 )
+             {
+-                unsigned long byte_offset;
+-                byte_offset = op_bytes + (((-src.val-1) >> 3) & ~(op_bytes-1));
++                unsigned long byte_offset =
++                    op_bytes + (((-src.val - 1) >> 3) & ~(op_bytes - 1L));
++
+                 ea.mem.off -= byte_offset;
+                 src.val = (byte_offset << 3) + src.val;
+             }
+             else
+             {
+-                ea.mem.off += (src.val >> 3) & ~(op_bytes - 1);
++                ea.mem.off += (src.val >> 3) & ~(op_bytes - 1L);
+                 src.val &= (op_bytes << 3) - 1;
+             }
+         }
diff --git a/main/xen/xsa197-4.4-qemuu-CVE-2016-9381.patch b/main/xen/xsa197-4.4-qemuu-CVE-2016-9381.patch
new file mode 100644
index 0000000..07286d3
--- /dev/null
+++ b/main/xen/xsa197-4.4-qemuu-CVE-2016-9381.patch
_at_@ -0,0 +1,63 @@
+From: Jan Beulich <jbeulich_at_suse.com>
+Subject: xen: fix ioreq handling
+
+Avoid double fetches and bounds check size to avoid overflowing
+internal variables.
+
+This is XSA-197.
+
+Reported-by: yanghongke <yanghongke_at_huawei.com>
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Stefano Stabellini <sstabellini_at_kernel.org>
+
+--- a/tools/qemu-xen/xen-all.c
++++ b/tools/qemu-xen/xen-all.c
+_at_@ -705,6 +705,10 @@ static void cpu_ioreq_pio(ioreq_t *req)
+ {
+     uint32_t i;
+ 
++    if (req->size > sizeof(uint32_t)) {
++        hw_error("PIO: bad size (%u)", req->size);
++    }
++
+     if (req->dir == IOREQ_READ) {
+         if (!req->data_is_ptr) {
+             req->data = do_inp(req->addr, req->size);
+_at_@ -734,6 +738,10 @@ static void cpu_ioreq_move(ioreq_t *req)
+ {
+     uint32_t i;
+ 
++    if (req->size > sizeof(req->data)) {
++        hw_error("MMIO: bad size (%u)", req->size);
++    }
++
+     if (!req->data_is_ptr) {
+         if (req->dir == IOREQ_READ) {
+             for (i = 0; i < req->count; i++) {
+_at_@ -809,11 +817,13 @@ static int handle_buffered_iopage(XenIOS
+         req.df = 1;
+         req.type = buf_req->type;
+         req.data_is_ptr = 0;
++        xen_rmb();
+         qw = (req.size == 8);
+         if (qw) {
+             buf_req = &state->buffered_io_page->buf_ioreq[
+                 (state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM];
+             req.data |= ((uint64_t)buf_req->data) << 32;
++            xen_rmb();
+         }
+ 
+         handle_ioreq(&req);
+_at_@ -845,7 +855,11 @@ static void cpu_handle_ioreq(void *opaqu
+ 
+     handle_buffered_iopage(state);
+     if (req) {
+-        handle_ioreq(req);
++        ioreq_t copy = *req;
++
++        xen_rmb();
++        handle_ioreq(&copy);
++        req->data = copy.data;
+ 
+         if (req->state != STATE_IOREQ_INPROCESS) {
+             fprintf(stderr, "Badness in I/O request ... not in service?!: "
diff --git a/main/xen/xsa197-4.5-qemut-CVE-2016-9381.patch b/main/xen/xsa197-4.5-qemut-CVE-2016-9381.patch
new file mode 100644
index 0000000..9c428fe
--- /dev/null
+++ b/main/xen/xsa197-4.5-qemut-CVE-2016-9381.patch
_at_@ -0,0 +1,65 @@
+From: Jan Beulich <jbeulich_at_suse.com>
+Subject: xen: fix ioreq handling
+
+Avoid double fetches and bounds check size to avoid overflowing
+internal variables.
+
+This is XSA-197.
+
+Reported-by: yanghongke <yanghongke_at_huawei.com>
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: Ian Jackson <ian.jackson_at_eu.citrix.com>
+
+--- a/tools/qemu-xen-traditional/i386-dm/helper2.c
++++ b/tools/qemu-xen-traditional/i386-dm/helper2.c
+_at_@ -374,6 +374,11 @@ static void cpu_ioreq_pio(CPUState *env,
+ {
+     uint32_t i;
+ 
++    if (req->size > sizeof(unsigned long)) {
++        fprintf(stderr, "PIO: bad size (%u)\n", req->size);
++        exit(-1);
++    }
++
+     if (req->dir == IOREQ_READ) {
+         if (!req->data_is_ptr) {
+             req->data = do_inp(env, req->addr, req->size);
+_at_@ -403,6 +408,11 @@ static void cpu_ioreq_move(CPUState *env
+ {
+     uint32_t i;
+ 
++    if (req->size > sizeof(req->data)) {
++        fprintf(stderr, "MMIO: bad size (%u)\n", req->size);
++        exit(-1);
++    }
++
+     if (!req->data_is_ptr) {
+         if (req->dir == IOREQ_READ) {
+             for (i = 0; i < req->count; i++) {
+_at_@ -506,11 +516,13 @@ static int __handle_buffered_iopage(CPUS
+         req.df = 1;
+         req.type = buf_req->type;
+         req.data_is_ptr = 0;
++        xen_rmb();
+         qw = (req.size == 8);
+         if (qw) {
+             buf_req = &buffered_io_page->buf_ioreq[
+                 (buffered_io_page->read_pointer+1) % IOREQ_BUFFER_SLOT_NUM];
+             req.data |= ((uint64_t)buf_req->data) << 32;
++            xen_rmb();
+         }
+ 
+         __handle_ioreq(env, &req);
+_at_@ -543,7 +555,11 @@ static void cpu_handle_ioreq(void *opaqu
+ 
+     __handle_buffered_iopage(env);
+     if (req) {
+-        __handle_ioreq(env, req);
++        ioreq_t copy = *req;
++
++        xen_rmb();
++        __handle_ioreq(env, &copy);
++        req->data = copy.data;
+ 
+         if (req->state != STATE_IOREQ_INPROCESS) {
+             fprintf(logfile, "Badness in I/O request ... not in service?!: "
diff --git a/main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch b/main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch
new file mode 100644
index 0000000..dbf7084
--- /dev/null
+++ b/main/xen/xsa198-CVE-2016-9379-CVE-2016-9380.patch
_at_@ -0,0 +1,62 @@
+From 71a389ae940bc52bf897a6e5becd73fd8ede94c5 Mon Sep 17 00:00:00 2001
+From: Ian Jackson <ian.jackson_at_eu.citrix.com>
+Date: Thu, 3 Nov 2016 16:37:40 +0000
+Subject: [PATCH] pygrub: Properly quote results, when returning them to the
+ caller:
+
+* When the caller wants sexpr output, use `repr()'
+  This is what Xend expects.
+
+  The returned S-expressions are now escaped and quoted by Python,
+  generally using '...'.  Previously kernel and ramdisk were unquoted
+  and args was quoted with "..." but without proper escaping.  This
+  change may break toolstacks which do not properly dequote the
+  returned S-expressions.
+
+* When the caller wants "simple" output, crash if the delimiter is
+  contained in the returned value.
+
+  With --output-format=simple it does not seem like this could ever
+  happen, because the bootloader config parsers all take line-based
+  input from the various bootloader config files.
+
+  With --output-format=simple0, this can happen if the bootloader
+  config file contains nul bytes.
+
+This is XSA-198.
+
+Signed-off-by: Ian Jackson <Ian.Jackson_at_eu.citrix.com>
+Tested-by: Ian Jackson <Ian.Jackson_at_eu.citrix.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3_at_citrix.com>
+---
+ tools/pygrub/src/pygrub | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/tools/pygrub/src/pygrub b/tools/pygrub/src/pygrub
+index 40f9584..dd0c8f7 100755
+--- a/tools/pygrub/src/pygrub
++++ b/tools/pygrub/src/pygrub
+_at_@ -721,14 +721,17 @@ def sniff_netware(fs, cfg):
+     return cfg
+ 
+ def format_sxp(kernel, ramdisk, args):
+-    s = "linux (kernel %s)" % kernel
++    s = "linux (kernel %s)" % repr(kernel)
+     if ramdisk:
+-        s += "(ramdisk %s)" % ramdisk
++        s += "(ramdisk %s)" % repr(ramdisk)
+     if args:
+-        s += "(args \"%s\")" % args
++        s += "(args %s)" % repr(args)
+     return s
+                 
+ def format_simple(kernel, ramdisk, args, sep):
++    for check in (kernel, ramdisk, args):
++        if check is not None and sep in check:
++            raise RuntimeError, "simple format cannot represent delimiter-containing value"
+     s = ("kernel %s" % kernel) + sep
+     if ramdisk:
+         s += ("ramdisk %s" % ramdisk) + sep
+-- 
+2.1.4
+
-- 
2.2.1
---
Unsubscribe:  alpine-aports+unsubscribe_at_lists.alpinelinux.org
Help:         alpine-aports+help_at_lists.alpinelinux.org
---
Received on Mon Dec 12 2016 - 11:14:51 GMT