Mail archive
alpine-aports

[alpine-aports] [PATCH] main/xen: security fixes for XSA-246 and XSA-247

From: Daniel Sabogal <dsabogalcc_at_gmail.com>
Date: Wed, 29 Nov 2017 01:26:42 -0500

---
 main/xen/APKBUILD           |  12 ++-
 main/xen/xsa246-4.9.patch   |  74 +++++++++++++++++++
 main/xen/xsa247-4.9-1.patch | 176 ++++++++++++++++++++++++++++++++++++++++++++
 main/xen/xsa247-4.9-2.patch | 109 +++++++++++++++++++++++++++
 4 files changed, 370 insertions(+), 1 deletion(-)
 create mode 100644 main/xen/xsa246-4.9.patch
 create mode 100644 main/xen/xsa247-4.9-1.patch
 create mode 100644 main/xen/xsa247-4.9-2.patch
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index e128a57be4..bb02b2bee9 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
_at_@ -3,7 +3,7 @@
 # Maintainer: William Pitcock <nenolod_at_dereferenced.org>
 pkgname=xen
 pkgver=4.9.1
-pkgrel=0
+pkgrel=1
 pkgdesc="Xen hypervisor"
 url="http://www.xen.org/"
 arch="x86_64 armhf aarch64"
_at_@ -98,6 +98,9 @@ options="!strip"
 #     - CVE-2017-15594 XSA-244
 #   4.9.0-r7:
 #     - CVE-2017-15597 XSA-236
+#   4.9.1-r1:
+#     - XSA-246
+#     - XSA-247
 
 case "$CARCH" in
 x86*)
_at_@ -145,6 +148,10 @@ source="https://downloads.xenproject.org/release/$pkgname/$pkgver/$pkgname-$pkgv
 	http://xenbits.xen.org/xen-extfiles/zlib-$_ZLIB_VERSION.tar.gz
 	http://xenbits.xen.org/xen-extfiles/ipxe-git-$_IPXE_GIT_TAG.tar.gz
 
+	xsa246-4.9.patch
+	xsa247-4.9-1.patch
+	xsa247-4.9-2.patch
+
 	qemu-coroutine-gthread.patch
 	qemu-xen_paths.patch
 
_at_@ -403,6 +410,9 @@ c2bc9ffc8583aeae71cee9ddcc4418969768d4e3764d47307da54f93981c0109fb07d84b061b3a36
 4928b5b82f57645be9408362706ff2c4d9baa635b21b0d41b1c82930e8c60a759b1ea4fa74d7e6c7cae1b7692d006aa5cb72df0c3b88bf049779aa2b566f9d35  tpm_emulator-0.7.4.tar.gz
 021b958fcd0d346c4ba761bcf0cc40f3522de6186cf5a0a6ea34a70504ce9622b1c2626fce40675bc8282cf5f5ade18473656abc38050f72f5d6480507a2106e  zlib-1.2.3.tar.gz
 82ba65e1c676d32b29c71e6395c9506cab952c8f8b03f692e2b50133be8f0c0146d0f22c223262d81a4df579986fde5abc6507869f4965be4846297ef7b4b890  ipxe-git-827dd1bfee67daa683935ce65316f7e0f057fe1c.tar.gz
+b00f42d2069f273e204698177d2c36950cee759a92dfe7833c812ddff4dedde2c4a842980927ec4fc46d1f54b49879bf3a3681c6faf30b72fb3ad6a7eba060b2  xsa246-4.9.patch
+c5e064543048751fda86ce64587493518da87d219ff077abb83ac13d8381ceb29f1b6479fc0b761b8f7a04c8c70203791ac4a8cc79bbc6f4dcfa6661c4790c5e  xsa247-4.9-1.patch
+71aefbe27cbd1d1d363b7d5826c69a238e4aad2958a1c6da330ae5daee791f54ce1d01fb79db84ed4248ab8b1593c9c28c3de5108f4d0953b04f7819af23a1d1  xsa247-4.9-2.patch
 c3c46f232f0bd9f767b232af7e8ce910a6166b126bd5427bb8dc325aeb2c634b956de3fc225cab5af72649070c8205cc8e1cab7689fc266c204f525086f1a562  qemu-coroutine-gthread.patch
 1936ab39a1867957fa640eb81c4070214ca4856a2743ba7e49c0cd017917071a9680d015f002c57fa7b9600dbadd29dcea5887f50e6c133305df2669a7a933f3  qemu-xen_paths.patch
 f095ea373f36381491ad36f0662fb4f53665031973721256b23166e596318581da7cbb0146d0beb2446729adfdb321e01468e377793f6563a67d68b8b0f7ffe3  hotplug-vif-vtrill.patch
diff --git a/main/xen/xsa246-4.9.patch b/main/xen/xsa246-4.9.patch
new file mode 100644
index 0000000000..6370a10625
--- /dev/null
+++ b/main/xen/xsa246-4.9.patch
_at_@ -0,0 +1,74 @@
+From: Julien Grall <julien.grall_at_linaro.org>
+Subject: x86/pod: prevent infinite loop when shattering large pages
+
+When populating pages, the PoD may need to split large ones using
+p2m_set_entry and request the caller to retry (see ept_get_entry for
+instance).
+
+p2m_set_entry may fail to shatter if it is not possible to allocate
+memory for the new page table. However, the error is not propagated
+resulting to the callers to retry infinitely the PoD.
+
+Prevent the infinite loop by return false when it is not possible to
+shatter the large mapping.
+
+This is XSA-246.
+
+Signed-off-by: Julien Grall <julien.grall_at_linaro.org>
+Signed-off-by: Jan Beulich <jbeulich_at_suse.com>
+Reviewed-by: George Dunlap <george.dunlap_at_citrix.com>
+
+--- a/xen/arch/x86/mm/p2m-pod.c
++++ b/xen/arch/x86/mm/p2m-pod.c
+_at_@ -1071,9 +1071,8 @@ p2m_pod_demand_populate(struct p2m_domai
+          * NOTE: In a fine-grained p2m locking scenario this operation
+          * may need to promote its locking from gfn->1g superpage
+          */
+-        p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
+-                      p2m_populate_on_demand, p2m->default_access);
+-        return 0;
++        return p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
++                             p2m_populate_on_demand, p2m->default_access);
+     }
+ 
+     /* Only reclaim if we're in actual need of more cache. */
+_at_@ -1104,8 +1103,12 @@ p2m_pod_demand_populate(struct p2m_domai
+ 
+     gfn_aligned = (gfn >> order) << order;
+ 
+-    p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
+-                  p2m->default_access);
++    if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
++                       p2m->default_access) )
++    {
++        p2m_pod_cache_add(p2m, p, order);
++        goto out_fail;
++    }
+ 
+     for( i = 0; i < (1UL << order); i++ )
+     {
+_at_@ -1150,13 +1153,18 @@ remap_and_retry:
+     BUG_ON(order != PAGE_ORDER_2M);
+     pod_unlock(p2m);
+ 
+-    /* Remap this 2-meg region in singleton chunks */
+-    /* NOTE: In a p2m fine-grained lock scenario this might
+-     * need promoting the gfn lock from gfn->2M superpage */
++    /*
++     * Remap this 2-meg region in singleton chunks. See the comment on the
++     * 1G page splitting path above for why a single call suffices.
++     *
++     * NOTE: In a p2m fine-grained lock scenario this might
++     * need promoting the gfn lock from gfn->2M superpage.
++     */
+     gfn_aligned = (gfn>>order)<<order;
+-    for(i=0; i<(1<<order); i++)
+-        p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
+-                      p2m_populate_on_demand, p2m->default_access);
++    if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K,
++                       p2m_populate_on_demand, p2m->default_access) )
++        return -1;
++
+     if ( tb_init_done )
+     {
+         struct {
diff --git a/main/xen/xsa247-4.9-1.patch b/main/xen/xsa247-4.9-1.patch
new file mode 100644
index 0000000000..e86d5616c4
--- /dev/null
+++ b/main/xen/xsa247-4.9-1.patch
_at_@ -0,0 +1,176 @@
+From ad208b8b7e45fb2b7c572b86c61c26412609e82d Mon Sep 17 00:00:00 2001
+From: George Dunlap <george.dunlap_at_citrix.com>
+Date: Fri, 10 Nov 2017 16:53:54 +0000
+Subject: [PATCH 1/2] p2m: Always check to see if removing a p2m entry actually
+ worked
+
+The PoD zero-check functions speculatively remove memory from the p2m,
+then check to see if it's completely zeroed, before putting it in the
+cache.
+
+Unfortunately, the p2m_set_entry() calls may fail if the underlying
+pagetable structure needs to change and the domain has exhausted its
+p2m memory pool: for instance, if we're removing a 2MiB region out of
+a 1GiB entry (in the p2m_pod_zero_check_superpage() case), or a 4k
+region out of a 2MiB or larger entry (in the p2m_pod_zero_check()
+case); and the return value is not checked.
+
+The underlying mfn will then be added into the PoD cache, and at some
+point mapped into another location in the p2m.  If the guest
+afterwards ballons out this memory, it will be freed to the hypervisor
+and potentially reused by another domain, in spite of the fact that
+the original domain still has writable mappings to it.
+
+There are several places where p2m_set_entry() shouldn't be able to
+fail, as it is guaranteed to write an entry of the same order that
+succeeded before.  Add a backstop of crashing the domain just in case,
+and an ASSERT_UNREACHABLE() to flag up the broken assumption on debug
+builds.
+
+While we're here, use PAGE_ORDER_2M rather than a magic constant.
+
+This is part of XSA-247.
+
+Reported-by: XXX PERSON <XXX EMAIL>
+Signed-off-by: George Dunlap <george.dunlap_at_citrix.com>
+Reviewed-by: Jan Beulich <jbeulich_at_suse.com>
+---
+v4:
+- Removed some training whitespace
+v3:
+- Reformat reset clause to be more compact
+- Make sure to set map[i] = NULL when unmapping in case we need to bail
+v2:
+- Crash a domain if a p2m_set_entry we think cannot fail fails anyway.
+---
+ xen/arch/x86/mm/p2m-pod.c | 77 +++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 61 insertions(+), 16 deletions(-)
+
+diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
+index 730a48f928..f2ed751892 100644
+--- a/xen/arch/x86/mm/p2m-pod.c
++++ b/xen/arch/x86/mm/p2m-pod.c
+_at_@ -752,8 +752,10 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
+     }
+ 
+     /* Try to remove the page, restoring old mapping if it fails. */
+-    p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
+-                  p2m_populate_on_demand, p2m->default_access);
++    if ( p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
++                       p2m_populate_on_demand, p2m->default_access) )
++        goto out;
++
+     p2m_tlb_flush_sync(p2m);
+ 
+     /* Make none of the MFNs are used elsewhere... for example, mapped
+_at_@ -810,9 +812,18 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
+     ret = SUPERPAGE_PAGES;
+ 
+ out_reset:
+-    if ( reset )
+-        p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
+-    
++    /*
++     * This p2m_set_entry() call shouldn't be able to fail, since the same order
++     * on the same gfn succeeded above.  If that turns out to be false, crashing
++     * the domain should be the safest way of making sure we don't leak memory.
++     */
++    if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M,
++                                type0, p2m->default_access) )
++    {
++        ASSERT_UNREACHABLE();
++        domain_crash(d);
++    }
++
+ out:
+     gfn_unlock(p2m, gfn, SUPERPAGE_ORDER);
+     return ret;
+_at_@ -869,19 +880,30 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
+         }
+ 
+         /* Try to remove the page, restoring old mapping if it fails. */
+-        p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
+-                      p2m_populate_on_demand, p2m->default_access);
++        if ( p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
++                           p2m_populate_on_demand, p2m->default_access) )
++            goto skip;
+ 
+         /* See if the page was successfully unmapped.  (Allow one refcount
+          * for being allocated to a domain.) */
+         if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 )
+         {
++            /*
++             * If the previous p2m_set_entry call succeeded, this one shouldn't
++             * be able to fail.  If it does, crashing the domain should be safe.
++             */
++            if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
++                               types[i], p2m->default_access) )
++            {
++                ASSERT_UNREACHABLE();
++                domain_crash(d);
++                goto out_unmap;
++            }
++
++        skip:
+             unmap_domain_page(map[i]);
+             map[i] = NULL;
+ 
+-            p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+-                types[i], p2m->default_access);
+-
+             continue;
+         }
+     }
+_at_@ -900,12 +922,25 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
+ 
+         unmap_domain_page(map[i]);
+ 
+-        /* See comment in p2m_pod_zero_check_superpage() re gnttab
+-         * check timing.  */
+-        if ( j < PAGE_SIZE/sizeof(*map[i]) )
++        map[i] = NULL;
++
++        /*
++         * See comment in p2m_pod_zero_check_superpage() re gnttab
++         * check timing.
++         */
++        if ( j < (PAGE_SIZE / sizeof(*map[i])) )
+         {
+-            p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+-                types[i], p2m->default_access);
++            /*
++             * If the previous p2m_set_entry call succeeded, this one shouldn't
++             * be able to fail.  If it does, crashing the domain should be safe.
++             */
++            if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
++                               types[i], p2m->default_access) )
++            {
++                ASSERT_UNREACHABLE();
++                domain_crash(d);
++                goto out_unmap;
++            }
+         }
+         else
+         {
+_at_@ -929,7 +964,17 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
+             p2m->pod.entry_count++;
+         }
+     }
+-    
++
++    return;
++
++out_unmap:
++    /*
++     * Something went wrong, probably crashing the domain.  Unmap
++     * everything and return.
++     */
++    for ( i = 0; i < count; i++ )
++        if ( map[i] )
++            unmap_domain_page(map[i]);
+ }
+ 
+ #define POD_SWEEP_LIMIT 1024
+-- 
+2.15.0
+
diff --git a/main/xen/xsa247-4.9-2.patch b/main/xen/xsa247-4.9-2.patch
new file mode 100644
index 0000000000..13737a9bf2
--- /dev/null
+++ b/main/xen/xsa247-4.9-2.patch
_at_@ -0,0 +1,109 @@
+From d4bc7833707351a5341a6bdf04c752a028d9560d Mon Sep 17 00:00:00 2001
+From: George Dunlap <george.dunlap_at_citrix.com>
+Date: Fri, 10 Nov 2017 16:53:55 +0000
+Subject: [PATCH 2/2] p2m: Check return value of p2m_set_entry() when
+ decreasing reservation
+
+If the entire range specified to p2m_pod_decrease_reservation() is marked
+populate-on-demand, then it will make a single p2m_set_entry() call,
+reducing its PoD entry count.
+
+Unfortunately, in the right circumstances, this p2m_set_entry() call
+may fail.  It that case, repeated calls to decrease_reservation() may
+cause p2m->pod.entry_count to fall below zero, potentially tripping
+over BUG_ON()s to the contrary.
+
+Instead, check to see if the entry succeeded, and return false if not.
+The caller will then call guest_remove_page() on the gfns, which will
+return -EINVAL upon finding no valid memory there to return.
+
+Unfortunately if the order > 0, the entry may have partially changed.
+A domain_crash() is probably the safest thing in that case.
+
+Other p2m_set_entry() calls in the same function should be fine,
+because they are writing the entry at its current order.  Nonetheless,
+check the return value and crash if our assumption turns otu to be
+wrong.
+
+This is part of XSA-247.
+
+Reported-by: XXX PERSON <XXX EMAIL>
+Signed-off-by: George Dunlap <george.dunlap_at_citrix.com>
+Reviewed-by: Jan Beulich <jbeulich_at_suse.com>
+---
+v2: Crash the domain if we're not sure it's safe (or if we think it
+can't happen)
+---
+ xen/arch/x86/mm/p2m-pod.c | 42 +++++++++++++++++++++++++++++++++---------
+ 1 file changed, 33 insertions(+), 9 deletions(-)
+
+diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
+index f2ed751892..473d6a6dbf 100644
+--- a/xen/arch/x86/mm/p2m-pod.c
++++ b/xen/arch/x86/mm/p2m-pod.c
+_at_@ -555,11 +555,23 @@ p2m_pod_decrease_reservation(struct domain *d,
+ 
+     if ( !nonpod )
+     {
+-        /* All PoD: Mark the whole region invalid and tell caller
+-         * we're done. */
+-        p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
+-                      p2m->default_access);
+-        p2m->pod.entry_count-=(1<<order);
++        /*
++         * All PoD: Mark the whole region invalid and tell caller
++         * we're done.
++         */
++        if ( p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
++                           p2m->default_access) )
++        {
++            /*
++             * If this fails, we can't tell how much of the range was changed.
++             * Best to crash the domain unless we're sure a partial change is
++             * impossible.
++             */
++            if ( order != 0 )
++                domain_crash(d);
++            goto out_unlock;
++        }
++        p2m->pod.entry_count -= 1UL << order;
+         BUG_ON(p2m->pod.entry_count < 0);
+         ret = 1;
+         goto out_entry_check;
+_at_@ -600,8 +612,14 @@ p2m_pod_decrease_reservation(struct domain *d,
+         n = 1UL << cur_order;
+         if ( t == p2m_populate_on_demand )
+         {
+-            p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
+-                          p2m_invalid, p2m->default_access);
++            /* This shouldn't be able to fail */
++            if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
++                               p2m_invalid, p2m->default_access) )
++            {
++                ASSERT_UNREACHABLE();
++                domain_crash(d);
++                goto out_unlock;
++            }
+             p2m->pod.entry_count -= n;
+             BUG_ON(p2m->pod.entry_count < 0);
+             pod -= n;
+_at_@ -622,8 +640,14 @@ p2m_pod_decrease_reservation(struct domain *d,
+ 
+             page = mfn_to_page(mfn);
+ 
+-            p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
+-                          p2m_invalid, p2m->default_access);
++            /* This shouldn't be able to fail */
++            if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
++                               p2m_invalid, p2m->default_access) )
++            {
++                ASSERT_UNREACHABLE();
++                domain_crash(d);
++                goto out_unlock;
++            }
+             p2m_tlb_flush_sync(p2m);
+             for ( j = 0; j < n; ++j )
+                 set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
+-- 
+2.15.0
+
-- 
2.15.0
---
Unsubscribe:  alpine-aports+unsubscribe_at_lists.alpinelinux.org
Help:         alpine-aports+help_at_lists.alpinelinux.org
---
Received on Wed Nov 29 2017 - 01:26:42 GMT