mirror of
https://gitlab.alpinelinux.org/alpine/aports.git
synced 2026-01-05 00:32:17 +01:00
main/xen: security fix multiple vulnerabilties. Fixes #5159
(CVE-2016-2270, XSA-154) (CVE-2015-8550, XSA-155) (CVE-2015-8339, CVE-2015-8340, XSA-159) (CVE-2015-8341, XSA-160) (CVE-2015-8555, XSA-165) (CVE-2016-1570, XSA-167) (CVE-2016-1571, XSA 168) (CVE-2015-8615, XSA-169) (CVE-2016-2271, XSA-170)
This commit is contained in:
parent
c1d177c44a
commit
ccba2d08cc
@ -3,7 +3,7 @@
|
||||
# Maintainer: William Pitcock <nenolod@dereferenced.org>
|
||||
pkgname=xen
|
||||
pkgver=4.6.0
|
||||
pkgrel=4
|
||||
pkgrel=5
|
||||
pkgdesc="Xen hypervisor"
|
||||
url="http://www.xen.org/"
|
||||
arch="x86_64"
|
||||
@ -48,7 +48,18 @@ source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.g
|
||||
xsa151.patch
|
||||
xsa152.patch
|
||||
xsa153-libxl.patch
|
||||
xsa154.patch
|
||||
xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch
|
||||
xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
|
||||
xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
|
||||
xsa156.patch
|
||||
xsa159.patch
|
||||
xsa160.patch
|
||||
xsa165.patch
|
||||
xsa167.patch
|
||||
xsa168.patch
|
||||
xsa169.patch
|
||||
xsa170.patch
|
||||
|
||||
qemu-coroutine-gthread.patch
|
||||
qemu-xen_paths.patch
|
||||
@ -243,7 +254,18 @@ ebd65969e47ea94480d031481521259f xsa150.patch
|
||||
b9c287c042317017f201a45193fdcf17 xsa151.patch
|
||||
161a985c52ca2db47c09ae3245f8bceb xsa152.patch
|
||||
e5ddc6b5a2c7ef0437812ce39cb55034 xsa153-libxl.patch
|
||||
2109cf26a61f99158615d0e8566aa7d9 xsa154.patch
|
||||
8e87b1bcd1e5c057c8d7ad41010c27f1 xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch
|
||||
48be8e53712d8656549fcdf1a96ffdec xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
|
||||
21448f920d1643580e261ac3650d1ef9 xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
|
||||
ea188fa0ada9e5217f166dc3f0b8102c xsa156.patch
|
||||
9dad98f18893ab696e7a26e5d2a707b2 xsa159.patch
|
||||
7c53a997967656b10a3b2494c3f5a96d xsa160.patch
|
||||
7f5cc2a2e8e7fe705ae8764595065ff0 xsa165.patch
|
||||
e3423c61854be1658ea7aa596594c2d1 xsa167.patch
|
||||
b837726ce186fa61cfe7238b225b0335 xsa168.patch
|
||||
0931b87a6b9ba846c5797dbbbacdf324 xsa169.patch
|
||||
e0fd8934b37592a6a3e6ab107a2ab41a xsa170.patch
|
||||
de1a3db370b87cfb0bddb51796b50315 qemu-coroutine-gthread.patch
|
||||
08bfdf8caff5d631f53660bf3fd4edaf qemu-xen_paths.patch
|
||||
e449bb3359b490804ffc7b0ae08d62a0 hotplug-vif-vtrill.patch
|
||||
@ -282,7 +304,18 @@ e01628400b81c4bb7bafba348f2ecb1fe80f16e3162cee5013e0be1d7311738b xsa149.patch
|
||||
e247a9dbbe236ffa3c5aa5e2d41047fa67da80f2b0474eef3440b5b3da2d5617 xsa151.patch
|
||||
596f51797aa591b5abd068ead03e21215cf70997c98a4a562392499afe47b81c xsa152.patch
|
||||
f5cbc98cba758e10da0a01d9379012ec56b98a85a92bfeb0c6b8132d4b91ce77 xsa153-libxl.patch
|
||||
eec88c2a57466f83a81844cb7025f70c2b671d07a75d85487d4ed73cdabbb020 xsa154.patch
|
||||
e52467fcec73bcc86d3e96d06f8ca8085ae56a83d2c42a30c16bc3dc630d8f8a xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch
|
||||
eae34c8ccc096ad93a74190506b3d55020a88afb0cc504a3a514590e9fd746fd xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
|
||||
42780265014085a4221ad32b026214693d751789eb5219e2e83862c0006c66f4 xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
|
||||
d92729ca9174f7d1d8c6fd31321d1a58696c0630e87420539c32f7718b9e8ee8 xsa156.patch
|
||||
05c35871c1430e9cfdbee049411b23fca6c64c5bc9f112d7508afe5cbd289cef xsa159.patch
|
||||
40362873b7fa2c1450596ef9ea23c73f80608b77ca50b89e62daf46c131fcee6 xsa160.patch
|
||||
4bb18f2e44f49f140932c2d1e956e2e28017439cbb0e76eb16a8af617c4112ac xsa165.patch
|
||||
2bd786cccfd13c6732d6db8afc9e18058465efcb1bc93f894c359e3a820d5403 xsa167.patch
|
||||
c95198a66485d6e538d113ce2b84630d77c15f597113c38fadd6bf1e24e4c8ec xsa168.patch
|
||||
b818922880313cdbc12ea68ae757da5eabed9b3c9e1f8acefe1653683545ccbe xsa169.patch
|
||||
77b4b14b2c93da5f68e724cf74e1616f7df2e78305f66d164b3de2d980221a9a xsa170.patch
|
||||
3941f99b49c7e8dafc9fae8aad2136a14c6d84533cd542cc5f1040a41ef7c6fe qemu-coroutine-gthread.patch
|
||||
e4e5e838e259a3116978aabbcebc1865a895179a7fcbf4bad195c83e9b4c0f98 qemu-xen_paths.patch
|
||||
dd1e784bc455eb62cb85b3fa24bfc34f575ceaab9597ef6a2f1ee7ff7b3cae0a hotplug-vif-vtrill.patch
|
||||
@ -321,7 +354,18 @@ f6d1753641741c6d921ec6ba4acd9ac9df511ef1a7ca7c21fb3498a2b7b8758827b9d8cb19543ffd
|
||||
d1d6f11ff4c108d57de408cd75a818eeb124b3788c480bee6eb46ffdb18ef53a5dd96588f961f3336881d38c07908fae7c4042d8ee7267704647b306180aaebf xsa151.patch
|
||||
e442c062b6bcf54761784649d3b21df2b4e46b7e1d94ab7375e227e65d6741b5457a838e72569ab9e49fb0ca57063226652f9efd4331356b822d686829682faa xsa152.patch
|
||||
a33a184fdb1588ee17ddaab53dd45f9e68b2523f99278de7e8a403b36ce2dd71efcccae1c94b4b196f5d83d6423766a23e48fbf0a6a2e1dd681313edb0d1c399 xsa153-libxl.patch
|
||||
fde4c58acb857bd4eec807a78bee356a02358174e8c52a66555a6ad9cf5670b43391429ff973e74d27ee43a27c338b89bc3e63d2d821ee85682d8799d3bdd35c xsa154.patch
|
||||
96574c07cc31b11cddbe90bbfd0ff92ec9a2aa52903f74258e1291c1dec91e85c65c18ce10ed85aa659e3c363a460375153f2f45f1bbc4cebcc904398518a8f4 xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch
|
||||
d64d7e0dd96e31fa45d9d9b0cad9c543484709d699d9ab2efe1992f9375e8e0d67b0164e9ea8d3e75998388964f2fbfd96b5520a4acf13804dcf8c3472e37791 xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
|
||||
cad6b571ccca123e2a797cf82669ad0fe2e1ec99b7a68396beb3a2279e2cf87d8f0cf75e22dcd98238dd5031b2c7e9cb86d02ecaa82ae973fba6d26b2acfb514 xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
|
||||
a879a7c8f5a1a49d5c1dc9c80ca5a7086b68f5cfa1938819ec93f354f2ba916862e8a553822f0e8d004fe90cf389c37675fc2c523157ad8a2426f60dcc03715d xsa156.patch
|
||||
82a8cd774078b201e3ca854b2419d5c2e69d8168066dcf0cf2c7373b649a9c0325b568cb7434b0f50e10dcc965a0557e845309dd1ddb9925950f386b12249d5d xsa159.patch
|
||||
91819a014821ff1b468a0e116edf657ea4db64b095637da1886caa3b8b29ffda8d00915e808508d8ecd526be9ce325b7e9733c220fba2b2cfaaee0977b1d9454 xsa160.patch
|
||||
ecd47873290937ce13b6d4f23751c62a7fe16a5f0c9a3d895b91b8c9065829883333181755240298a1bb4cfdb8414d431422d1a2a7fbd390707b373d5ca1a9a9 xsa165.patch
|
||||
5e908dc801eb5d15c59156c6d3bbe24df21acb39ef3a337b43e0f5bc0bbeaee78c9dc8352880251379dddbe203acbd8762abee954ede25dfaf032c6959c8fe09 xsa167.patch
|
||||
c55ee924b21edf54ce3c873d952a20f32f851661a13514528d42d2ef36767cfa9e31b1a42a4e0f40ff1011c692c406155fcc59be0c43fd44973cd0a5acee2ac7 xsa168.patch
|
||||
5bc99d5b4e8e57852c88401c49cc97f82706763f88682ed8faad6344fb0e17782ed7ba063fd463c3da46e28994af11e575ce6e02aa957ff042e3c86269d15acc xsa169.patch
|
||||
09a6defca0f32319dddf4325fb0105a468517a7150c8a8ea287677b4a55f09bf776f5aa673bae22a0708537cf075d5e2143a24aa1b08629ef911a7cdfd8376f0 xsa170.patch
|
||||
c3c46f232f0bd9f767b232af7e8ce910a6166b126bd5427bb8dc325aeb2c634b956de3fc225cab5af72649070c8205cc8e1cab7689fc266c204f525086f1a562 qemu-coroutine-gthread.patch
|
||||
1936ab39a1867957fa640eb81c4070214ca4856a2743ba7e49c0cd017917071a9680d015f002c57fa7b9600dbadd29dcea5887f50e6c133305df2669a7a933f3 qemu-xen_paths.patch
|
||||
f095ea373f36381491ad36f0662fb4f53665031973721256b23166e596318581da7cbb0146d0beb2446729adfdb321e01468e377793f6563a67d68b8b0f7ffe3 hotplug-vif-vtrill.patch
|
||||
|
||||
359
main/xen/xsa154.patch
Normal file
359
main/xen/xsa154.patch
Normal file
@ -0,0 +1,359 @@
|
||||
x86: enforce consistent cachability of MMIO mappings
|
||||
|
||||
We've been told by Intel that inconsistent cachability between
|
||||
multiple mappings of the same page can affect system stability only
|
||||
when the affected page is an MMIO one. Since the stale data issue is
|
||||
of no relevance to the hypervisor (since all guest memory accesses go
|
||||
through proper accessors and validation), handling of RAM pages
|
||||
remains unchanged here. Any MMIO mapped by domains however needs to be
|
||||
done consistently (all cachable mappings or all uncachable ones), in
|
||||
order to avoid Machine Check exceptions. Since converting existing
|
||||
cachable mappings to uncachable (at the time an uncachable mapping
|
||||
gets established) would in the PV case require tracking all mappings,
|
||||
allow MMIO to only get mapped uncachable (UC, UC-, or WC).
|
||||
|
||||
This also implies that in the PV case we mustn't use the L1 PTE update
|
||||
fast path when cachability flags get altered.
|
||||
|
||||
Since in the HVM case at least for now we want to continue honoring
|
||||
pinned cachability attributes for pages not mapped by the hypervisor,
|
||||
special case handling of r/o MMIO pages (forcing UC) gets added there.
|
||||
Arguably the counterpart change to p2m-pt.c may not be necessary, since
|
||||
UC- (which already gets enforced there) is probably strict enough.
|
||||
|
||||
Note that the shadow code changes include fixing the write protection
|
||||
of r/o MMIO ranges: shadow_l1e_remove_flags() and its siblings, other
|
||||
than l1e_remove_flags() and alike, return the new PTE (and hence
|
||||
ignoring their return values makes them no-ops).
|
||||
|
||||
This is CVE-2016-2270 / XSA-154.
|
||||
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
--- a/docs/misc/xen-command-line.markdown
|
||||
+++ b/docs/misc/xen-command-line.markdown
|
||||
@@ -1080,6 +1080,15 @@ limit is ignored by Xen.
|
||||
|
||||
Specify if the MMConfig space should be enabled.
|
||||
|
||||
+### mmio-relax
|
||||
+> `= <boolean> | all`
|
||||
+
|
||||
+> Default: `false`
|
||||
+
|
||||
+By default, domains may not create cached mappings to MMIO regions.
|
||||
+This option relaxes the check for Domain 0 (or when using `all`, all PV
|
||||
+domains), to permit the use of cacheable MMIO mappings.
|
||||
+
|
||||
### msi
|
||||
> `= <boolean>`
|
||||
|
||||
--- a/xen/arch/x86/hvm/mtrr.c
|
||||
+++ b/xen/arch/x86/hvm/mtrr.c
|
||||
@@ -807,8 +807,17 @@ int epte_get_entry_emt(struct domain *d,
|
||||
if ( v->domain != d )
|
||||
v = d->vcpu ? d->vcpu[0] : NULL;
|
||||
|
||||
- if ( !mfn_valid(mfn_x(mfn)) )
|
||||
+ if ( !mfn_valid(mfn_x(mfn)) ||
|
||||
+ rangeset_contains_range(mmio_ro_ranges, mfn_x(mfn),
|
||||
+ mfn_x(mfn) + (1UL << order) - 1) )
|
||||
+ {
|
||||
+ *ipat = 1;
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
+ }
|
||||
+
|
||||
+ if ( rangeset_overlaps_range(mmio_ro_ranges, mfn_x(mfn),
|
||||
+ mfn_x(mfn) + (1UL << order) - 1) )
|
||||
+ return -1;
|
||||
|
||||
switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
|
||||
{
|
||||
--- a/xen/arch/x86/mm/p2m-pt.c
|
||||
+++ b/xen/arch/x86/mm/p2m-pt.c
|
||||
@@ -107,6 +107,8 @@ static unsigned long p2m_type_to_flags(p
|
||||
case p2m_mmio_direct:
|
||||
if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
|
||||
flags |= _PAGE_RW;
|
||||
+ else
|
||||
+ flags |= _PAGE_PWT;
|
||||
return flags | P2M_BASE_FLAGS | _PAGE_PCD;
|
||||
}
|
||||
}
|
||||
--- a/xen/arch/x86/mm/shadow/multi.c
|
||||
+++ b/xen/arch/x86/mm/shadow/multi.c
|
||||
@@ -519,6 +519,7 @@ _sh_propagate(struct vcpu *v,
|
||||
gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
|
||||
u32 pass_thru_flags;
|
||||
u32 gflags, sflags;
|
||||
+ bool_t mmio_mfn;
|
||||
|
||||
/* We don't shadow PAE l3s */
|
||||
ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
|
||||
@@ -559,7 +560,10 @@ _sh_propagate(struct vcpu *v,
|
||||
// mfn means that we can not usefully shadow anything, and so we
|
||||
// return early.
|
||||
//
|
||||
- if ( !mfn_valid(target_mfn)
|
||||
+ mmio_mfn = !mfn_valid(target_mfn)
|
||||
+ || (level == 1
|
||||
+ && page_get_owner(mfn_to_page(target_mfn)) == dom_io);
|
||||
+ if ( mmio_mfn
|
||||
&& !(level == 1 && (!shadow_mode_refcounts(d)
|
||||
|| p2mt == p2m_mmio_direct)) )
|
||||
{
|
||||
@@ -577,7 +581,7 @@ _sh_propagate(struct vcpu *v,
|
||||
_PAGE_RW | _PAGE_PRESENT);
|
||||
if ( guest_supports_nx(v) )
|
||||
pass_thru_flags |= _PAGE_NX_BIT;
|
||||
- if ( !shadow_mode_refcounts(d) && !mfn_valid(target_mfn) )
|
||||
+ if ( level == 1 && !shadow_mode_refcounts(d) && mmio_mfn )
|
||||
pass_thru_flags |= _PAGE_PAT | _PAGE_PCD | _PAGE_PWT;
|
||||
sflags = gflags & pass_thru_flags;
|
||||
|
||||
@@ -676,10 +680,14 @@ _sh_propagate(struct vcpu *v,
|
||||
}
|
||||
|
||||
/* Read-only memory */
|
||||
- if ( p2m_is_readonly(p2mt) ||
|
||||
- (p2mt == p2m_mmio_direct &&
|
||||
- rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn))) )
|
||||
+ if ( p2m_is_readonly(p2mt) )
|
||||
sflags &= ~_PAGE_RW;
|
||||
+ else if ( p2mt == p2m_mmio_direct &&
|
||||
+ rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn)) )
|
||||
+ {
|
||||
+ sflags &= ~(_PAGE_RW | _PAGE_PAT);
|
||||
+ sflags |= _PAGE_PCD | _PAGE_PWT;
|
||||
+ }
|
||||
|
||||
// protect guest page tables
|
||||
//
|
||||
@@ -1185,22 +1193,28 @@ static int shadow_set_l1e(struct domain
|
||||
&& !sh_l1e_is_magic(new_sl1e) )
|
||||
{
|
||||
/* About to install a new reference */
|
||||
- if ( shadow_mode_refcounts(d) ) {
|
||||
+ if ( shadow_mode_refcounts(d) )
|
||||
+ {
|
||||
+#define PAGE_FLIPPABLE (_PAGE_RW | _PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
|
||||
+ int rc;
|
||||
+
|
||||
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF);
|
||||
- switch ( shadow_get_page_from_l1e(new_sl1e, d, new_type) )
|
||||
+ switch ( rc = shadow_get_page_from_l1e(new_sl1e, d, new_type) )
|
||||
{
|
||||
default:
|
||||
/* Doesn't look like a pagetable. */
|
||||
flags |= SHADOW_SET_ERROR;
|
||||
new_sl1e = shadow_l1e_empty();
|
||||
break;
|
||||
- case 1:
|
||||
- shadow_l1e_remove_flags(new_sl1e, _PAGE_RW);
|
||||
+ case PAGE_FLIPPABLE & -PAGE_FLIPPABLE ... PAGE_FLIPPABLE:
|
||||
+ ASSERT(!(rc & ~PAGE_FLIPPABLE));
|
||||
+ new_sl1e = shadow_l1e_flip_flags(new_sl1e, rc);
|
||||
/* fall through */
|
||||
case 0:
|
||||
shadow_vram_get_l1e(new_sl1e, sl1e, sl1mfn, d);
|
||||
break;
|
||||
}
|
||||
+#undef PAGE_FLIPPABLE
|
||||
}
|
||||
}
|
||||
|
||||
--- a/xen/arch/x86/mm/shadow/types.h
|
||||
+++ b/xen/arch/x86/mm/shadow/types.h
|
||||
@@ -99,6 +99,9 @@ static inline u32 shadow_l4e_get_flags(s
|
||||
static inline shadow_l1e_t
|
||||
shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
|
||||
{ l1e_remove_flags(sl1e, flags); return sl1e; }
|
||||
+static inline shadow_l1e_t
|
||||
+shadow_l1e_flip_flags(shadow_l1e_t sl1e, u32 flags)
|
||||
+{ l1e_flip_flags(sl1e, flags); return sl1e; }
|
||||
|
||||
static inline shadow_l1e_t shadow_l1e_empty(void)
|
||||
{ return l1e_empty(); }
|
||||
--- a/xen/arch/x86/mm.c
|
||||
+++ b/xen/arch/x86/mm.c
|
||||
@@ -178,6 +178,18 @@ static uint32_t base_disallow_mask;
|
||||
is_pv_domain(d)) ? \
|
||||
L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS))
|
||||
|
||||
+static s8 __read_mostly opt_mmio_relax;
|
||||
+static void __init parse_mmio_relax(const char *s)
|
||||
+{
|
||||
+ if ( !*s )
|
||||
+ opt_mmio_relax = 1;
|
||||
+ else
|
||||
+ opt_mmio_relax = parse_bool(s);
|
||||
+ if ( opt_mmio_relax < 0 && strcmp(s, "all") )
|
||||
+ opt_mmio_relax = 0;
|
||||
+}
|
||||
+custom_param("mmio-relax", parse_mmio_relax);
|
||||
+
|
||||
static void __init init_frametable_chunk(void *start, void *end)
|
||||
{
|
||||
unsigned long s = (unsigned long)start;
|
||||
@@ -799,10 +811,7 @@ get_page_from_l1e(
|
||||
if ( !mfn_valid(mfn) ||
|
||||
(real_pg_owner = page_get_owner_and_reference(page)) == dom_io )
|
||||
{
|
||||
-#ifndef NDEBUG
|
||||
- const unsigned long *ro_map;
|
||||
- unsigned int seg, bdf;
|
||||
-#endif
|
||||
+ int flip = 0;
|
||||
|
||||
/* Only needed the reference to confirm dom_io ownership. */
|
||||
if ( mfn_valid(mfn) )
|
||||
@@ -836,24 +845,55 @@ get_page_from_l1e(
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
- if ( !(l1f & _PAGE_RW) ||
|
||||
- !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
|
||||
- return 0;
|
||||
+ if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
|
||||
+ {
|
||||
+ /* MMIO pages must not be mapped cachable unless requested so. */
|
||||
+ switch ( opt_mmio_relax )
|
||||
+ {
|
||||
+ case 0:
|
||||
+ break;
|
||||
+ case 1:
|
||||
+ if ( is_hardware_domain(l1e_owner) )
|
||||
+ case -1:
|
||||
+ return 0;
|
||||
+ default:
|
||||
+ ASSERT_UNREACHABLE();
|
||||
+ }
|
||||
+ }
|
||||
+ else if ( l1f & _PAGE_RW )
|
||||
+ {
|
||||
#ifndef NDEBUG
|
||||
- if ( !pci_mmcfg_decode(mfn, &seg, &bdf) ||
|
||||
- ((ro_map = pci_get_ro_map(seg)) != NULL &&
|
||||
- test_bit(bdf, ro_map)) )
|
||||
- printk(XENLOG_G_WARNING
|
||||
- "d%d: Forcing read-only access to MFN %lx\n",
|
||||
- l1e_owner->domain_id, mfn);
|
||||
- else
|
||||
- rangeset_report_ranges(mmio_ro_ranges, 0, ~0UL,
|
||||
- print_mmio_emul_range,
|
||||
- &(struct mmio_emul_range_ctxt){
|
||||
- .d = l1e_owner,
|
||||
- .mfn = mfn });
|
||||
+ const unsigned long *ro_map;
|
||||
+ unsigned int seg, bdf;
|
||||
+
|
||||
+ if ( !pci_mmcfg_decode(mfn, &seg, &bdf) ||
|
||||
+ ((ro_map = pci_get_ro_map(seg)) != NULL &&
|
||||
+ test_bit(bdf, ro_map)) )
|
||||
+ printk(XENLOG_G_WARNING
|
||||
+ "d%d: Forcing read-only access to MFN %lx\n",
|
||||
+ l1e_owner->domain_id, mfn);
|
||||
+ else
|
||||
+ rangeset_report_ranges(mmio_ro_ranges, 0, ~0UL,
|
||||
+ print_mmio_emul_range,
|
||||
+ &(struct mmio_emul_range_ctxt){
|
||||
+ .d = l1e_owner,
|
||||
+ .mfn = mfn });
|
||||
#endif
|
||||
- return 1;
|
||||
+ flip = _PAGE_RW;
|
||||
+ }
|
||||
+
|
||||
+ switch ( l1f & PAGE_CACHE_ATTRS )
|
||||
+ {
|
||||
+ case 0: /* WB */
|
||||
+ flip |= _PAGE_PWT | _PAGE_PCD;
|
||||
+ break;
|
||||
+ case _PAGE_PWT: /* WT */
|
||||
+ case _PAGE_PWT | _PAGE_PAT: /* WP */
|
||||
+ flip |= _PAGE_PCD | (l1f & _PAGE_PAT);
|
||||
+ break;
|
||||
+ }
|
||||
+
|
||||
+ return flip;
|
||||
}
|
||||
|
||||
if ( unlikely( (real_pg_owner != pg_owner) &&
|
||||
@@ -1243,8 +1283,9 @@ static int alloc_l1_table(struct page_in
|
||||
goto fail;
|
||||
case 0:
|
||||
break;
|
||||
- case 1:
|
||||
- l1e_remove_flags(pl1e[i], _PAGE_RW);
|
||||
+ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
|
||||
+ ASSERT(!(ret & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
|
||||
+ l1e_flip_flags(pl1e[i], ret);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1759,8 +1800,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
- /* Fast path for identical mapping, r/w and presence. */
|
||||
- if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
|
||||
+ /* Fast path for identical mapping, r/w, presence, and cachability. */
|
||||
+ if ( !l1e_has_changed(ol1e, nl1e,
|
||||
+ PAGE_CACHE_ATTRS | _PAGE_RW | _PAGE_PRESENT) )
|
||||
{
|
||||
adjust_guest_l1e(nl1e, pt_dom);
|
||||
if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
|
||||
@@ -1783,8 +1825,9 @@ static int mod_l1_entry(l1_pgentry_t *pl
|
||||
return rc;
|
||||
case 0:
|
||||
break;
|
||||
- case 1:
|
||||
- l1e_remove_flags(nl1e, _PAGE_RW);
|
||||
+ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
|
||||
+ ASSERT(!(rc & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
|
||||
+ l1e_flip_flags(nl1e, rc);
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
@@ -5000,6 +5043,7 @@ static int ptwr_emulated_update(
|
||||
l1_pgentry_t pte, ol1e, nl1e, *pl1e;
|
||||
struct vcpu *v = current;
|
||||
struct domain *d = v->domain;
|
||||
+ int ret;
|
||||
|
||||
/* Only allow naturally-aligned stores within the original %cr2 page. */
|
||||
if ( unlikely(((addr^ptwr_ctxt->cr2) & PAGE_MASK) || (addr & (bytes-1))) )
|
||||
@@ -5047,7 +5091,7 @@ static int ptwr_emulated_update(
|
||||
|
||||
/* Check the new PTE. */
|
||||
nl1e = l1e_from_intpte(val);
|
||||
- switch ( get_page_from_l1e(nl1e, d, d) )
|
||||
+ switch ( ret = get_page_from_l1e(nl1e, d, d) )
|
||||
{
|
||||
default:
|
||||
if ( is_pv_32bit_domain(d) && (bytes == 4) && (unaligned_addr & 4) &&
|
||||
@@ -5071,8 +5115,9 @@ static int ptwr_emulated_update(
|
||||
break;
|
||||
case 0:
|
||||
break;
|
||||
- case 1:
|
||||
- l1e_remove_flags(nl1e, _PAGE_RW);
|
||||
+ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
|
||||
+ ASSERT(!(ret & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
|
||||
+ l1e_flip_flags(nl1e, ret);
|
||||
break;
|
||||
}
|
||||
|
||||
--- a/xen/include/asm-x86/page.h
|
||||
+++ b/xen/include/asm-x86/page.h
|
||||
@@ -157,6 +157,9 @@ static inline l4_pgentry_t l4e_from_padd
|
||||
#define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
|
||||
#define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
|
||||
|
||||
+/* Flip flags in an existing L1 PTE. */
|
||||
+#define l1e_flip_flags(x, flags) ((x).l1 ^= put_pte_flags(flags))
|
||||
+
|
||||
/* Check if a pte's page mapping or significant access flags have changed. */
|
||||
#define l1e_has_changed(x,y,flags) \
|
||||
( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
|
||||
56
main/xen/xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch
Normal file
56
main/xen/xsa155-xen-0001-xen-Add-RING_COPY_REQUEST.patch
Normal file
@ -0,0 +1,56 @@
|
||||
From 12b11658a9d6a654a1e7acbf2f2d56ce9a396c86 Mon Sep 17 00:00:00 2001
|
||||
From: David Vrabel <david.vrabel@citrix.com>
|
||||
Date: Fri, 20 Nov 2015 11:59:05 -0500
|
||||
Subject: [PATCH 1/3] xen: Add RING_COPY_REQUEST()
|
||||
|
||||
Using RING_GET_REQUEST() on a shared ring is easy to use incorrectly
|
||||
(i.e., by not considering that the other end may alter the data in the
|
||||
shared ring while it is being inspected). Safe usage of a request
|
||||
generally requires taking a local copy.
|
||||
|
||||
Provide a RING_COPY_REQUEST() macro to use instead of
|
||||
RING_GET_REQUEST() and an open-coded memcpy(). This takes care of
|
||||
ensuring that the copy is done correctly regardless of any possible
|
||||
compiler optimizations.
|
||||
|
||||
Use a volatile source to prevent the compiler from reordering or
|
||||
omitting the copy.
|
||||
|
||||
This is part of XSA155.
|
||||
|
||||
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
|
||||
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
---
|
||||
v2: Add comment about GCC bug.
|
||||
---
|
||||
xen/include/public/io/ring.h | 14 ++++++++++++++
|
||||
1 file changed, 14 insertions(+)
|
||||
|
||||
diff --git a/xen/include/public/io/ring.h b/xen/include/public/io/ring.h
|
||||
index ba9401b..801c0da 100644
|
||||
--- a/xen/include/public/io/ring.h
|
||||
+++ b/xen/include/public/io/ring.h
|
||||
@@ -212,6 +212,20 @@ typedef struct __name##_back_ring __name##_back_ring_t
|
||||
#define RING_GET_REQUEST(_r, _idx) \
|
||||
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
|
||||
|
||||
+/*
|
||||
+ * Get a local copy of a request.
|
||||
+ *
|
||||
+ * Use this in preference to RING_GET_REQUEST() so all processing is
|
||||
+ * done on a local copy that cannot be modified by the other end.
|
||||
+ *
|
||||
+ * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
|
||||
+ * to be ineffective where _req is a struct which consists of only bitfields.
|
||||
+ */
|
||||
+#define RING_COPY_REQUEST(_r, _idx, _req) do { \
|
||||
+ /* Use volatile to force the copy into _req. */ \
|
||||
+ *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
|
||||
+} while (0)
|
||||
+
|
||||
#define RING_GET_RESPONSE(_r, _idx) \
|
||||
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
|
||||
|
||||
--
|
||||
2.1.0
|
||||
|
||||
75
main/xen/xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
Normal file
75
main/xen/xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
Normal file
@ -0,0 +1,75 @@
|
||||
From 851ffb4eea917e2708c912291dea4d133026c0ac Mon Sep 17 00:00:00 2001
|
||||
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
Date: Fri, 20 Nov 2015 12:16:02 -0500
|
||||
Subject: [PATCH 2/3] blktap2: Use RING_COPY_REQUEST
|
||||
|
||||
Instead of RING_GET_REQUEST. Using a local copy of the
|
||||
ring (and also with proper memory barriers) will mean
|
||||
we can do not have to worry about the compiler optimizing
|
||||
the code and doing a double-fetch in the shared memory space.
|
||||
|
||||
This is part of XSA155.
|
||||
|
||||
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
|
||||
---
|
||||
v2: Fix compile issues with tapdisk-vbd
|
||||
---
|
||||
tools/blktap2/drivers/block-log.c | 3 ++-
|
||||
tools/blktap2/drivers/tapdisk-vbd.c | 8 ++++----
|
||||
2 files changed, 6 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/tools/blktap2/drivers/block-log.c b/tools/blktap2/drivers/block-log.c
|
||||
index 5330cdc..5f3bd35 100644
|
||||
--- a/tools/blktap2/drivers/block-log.c
|
||||
+++ b/tools/blktap2/drivers/block-log.c
|
||||
@@ -494,11 +494,12 @@ static int ctl_kick(struct tdlog_state* s, int fd)
|
||||
reqstart = s->bring.req_cons;
|
||||
reqend = s->sring->req_prod;
|
||||
|
||||
+ xen_mb();
|
||||
BDPRINTF("ctl: ring kicked (start = %u, end = %u)", reqstart, reqend);
|
||||
|
||||
while (reqstart != reqend) {
|
||||
/* XXX actually submit these! */
|
||||
- memcpy(&req, RING_GET_REQUEST(&s->bring, reqstart), sizeof(req));
|
||||
+ RING_COPY_REQUEST(&s->bring, reqstart, &req);
|
||||
BDPRINTF("ctl: read request %"PRIu64":%u", req.sector, req.count);
|
||||
s->bring.req_cons = ++reqstart;
|
||||
|
||||
diff --git a/tools/blktap2/drivers/tapdisk-vbd.c b/tools/blktap2/drivers/tapdisk-vbd.c
|
||||
index 6d1d94a..89ef9ed 100644
|
||||
--- a/tools/blktap2/drivers/tapdisk-vbd.c
|
||||
+++ b/tools/blktap2/drivers/tapdisk-vbd.c
|
||||
@@ -1555,7 +1555,7 @@ tapdisk_vbd_pull_ring_requests(td_vbd_t *vbd)
|
||||
int idx;
|
||||
RING_IDX rp, rc;
|
||||
td_ring_t *ring;
|
||||
- blkif_request_t *req;
|
||||
+ blkif_request_t req;
|
||||
td_vbd_request_t *vreq;
|
||||
|
||||
ring = &vbd->ring;
|
||||
@@ -1566,16 +1566,16 @@ tapdisk_vbd_pull_ring_requests(td_vbd_t *vbd)
|
||||
xen_rmb();
|
||||
|
||||
for (rc = ring->fe_ring.req_cons; rc != rp; rc++) {
|
||||
- req = RING_GET_REQUEST(&ring->fe_ring, rc);
|
||||
+ RING_COPY_REQUEST(&ring->fe_ring, rc, &req);
|
||||
++ring->fe_ring.req_cons;
|
||||
|
||||
- idx = req->id;
|
||||
+ idx = req.id;
|
||||
vreq = &vbd->request_list[idx];
|
||||
|
||||
ASSERT(list_empty(&vreq->next));
|
||||
ASSERT(vreq->secs_pending == 0);
|
||||
|
||||
- memcpy(&vreq->req, req, sizeof(blkif_request_t));
|
||||
+ memcpy(&vreq->req, &req, sizeof(blkif_request_t));
|
||||
vbd->received++;
|
||||
vreq->vbd = vbd;
|
||||
|
||||
--
|
||||
2.1.4
|
||||
|
||||
@ -0,0 +1,41 @@
|
||||
From c1fce65e2b720684ea6ba76ae59921542bd154bb Mon Sep 17 00:00:00 2001
|
||||
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
Date: Fri, 20 Nov 2015 12:22:14 -0500
|
||||
Subject: [PATCH 3/3] libvchan: Read prod/cons only once.
|
||||
|
||||
We must ensure that the prod/cons are only read once and that
|
||||
the compiler won't try to optimize the reads. That is split
|
||||
the read of these in multiple instructions influencing later
|
||||
branch code. As such insert barriers when fetching the cons
|
||||
and prod index.
|
||||
|
||||
This is part of XSA155.
|
||||
|
||||
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
---
|
||||
tools/libvchan/io.c | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
diff --git a/tools/libvchan/io.c b/tools/libvchan/io.c
|
||||
index 8a9629b..381cc05 100644
|
||||
--- a/tools/libvchan/io.c
|
||||
+++ b/tools/libvchan/io.c
|
||||
@@ -117,6 +117,7 @@ static inline int send_notify(struct libxenvchan *ctrl, uint8_t bit)
|
||||
static inline int raw_get_data_ready(struct libxenvchan *ctrl)
|
||||
{
|
||||
uint32_t ready = rd_prod(ctrl) - rd_cons(ctrl);
|
||||
+ xen_mb(); /* Ensure 'ready' is read only once. */
|
||||
if (ready > rd_ring_size(ctrl))
|
||||
/* We have no way to return errors. Locking up the ring is
|
||||
* better than the alternatives. */
|
||||
@@ -158,6 +159,7 @@ int libxenvchan_data_ready(struct libxenvchan *ctrl)
|
||||
static inline int raw_get_buffer_space(struct libxenvchan *ctrl)
|
||||
{
|
||||
uint32_t ready = wr_ring_size(ctrl) - (wr_prod(ctrl) - wr_cons(ctrl));
|
||||
+ xen_mb(); /* Ensure 'ready' is read only once. */
|
||||
if (ready > wr_ring_size(ctrl))
|
||||
/* We have no way to return errors. Locking up the ring is
|
||||
* better than the alternatives. */
|
||||
--
|
||||
2.1.0
|
||||
|
||||
47
main/xen/xsa159.patch
Normal file
47
main/xen/xsa159.patch
Normal file
@ -0,0 +1,47 @@
|
||||
memory: fix XENMEM_exchange error handling
|
||||
|
||||
assign_pages() can fail due to the domain getting killed in parallel,
|
||||
which should not result in a hypervisor crash.
|
||||
|
||||
Also delete a redundant put_gfn() - all relevant paths leading to the
|
||||
"fail" label already do this (and there are also paths where it was
|
||||
plain wrong). All of the put_gfn()-s got introduced by 51032ca058
|
||||
("Modify naming of queries into the p2m"), including the otherwise
|
||||
unneeded initializer for k (with even a kind of misleading comment -
|
||||
the compiler warning could actually have served as a hint that the use
|
||||
is wrong).
|
||||
|
||||
This is XSA-159.
|
||||
|
||||
Reported-by: Julien Grall <julien.grall@citrix.com>
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
|
||||
--- a/xen/common/memory.c
|
||||
+++ b/xen/common/memory.c
|
||||
@@ -334,7 +334,7 @@ static long memory_exchange(XEN_GUEST_HA
|
||||
PAGE_LIST_HEAD(out_chunk_list);
|
||||
unsigned long in_chunk_order, out_chunk_order;
|
||||
xen_pfn_t gpfn, gmfn, mfn;
|
||||
- unsigned long i, j, k = 0; /* gcc ... */
|
||||
+ unsigned long i, j, k;
|
||||
unsigned int memflags = 0;
|
||||
long rc = 0;
|
||||
struct domain *d;
|
||||
@@ -572,11 +572,12 @@ static long memory_exchange(XEN_GUEST_HA
|
||||
fail:
|
||||
/* Reassign any input pages we managed to steal. */
|
||||
while ( (page = page_list_remove_head(&in_chunk_list)) )
|
||||
- {
|
||||
- put_gfn(d, gmfn + k--);
|
||||
if ( assign_pages(d, page, 0, MEMF_no_refcount) )
|
||||
- BUG();
|
||||
- }
|
||||
+ {
|
||||
+ BUG_ON(!d->is_dying);
|
||||
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
|
||||
+ put_page(page);
|
||||
+ }
|
||||
|
||||
dying:
|
||||
rcu_unlock_domain(d);
|
||||
69
main/xen/xsa160.patch
Normal file
69
main/xen/xsa160.patch
Normal file
@ -0,0 +1,69 @@
|
||||
From adcbd15b1aec8367f790774c998db199c9b577bf Mon Sep 17 00:00:00 2001
|
||||
From: Ian Jackson <ian.jackson@eu.citrix.com>
|
||||
Date: Wed, 18 Nov 2015 15:34:54 +0000
|
||||
Subject: [PATCH] libxl: Fix bootloader-related virtual memory leak on pv
|
||||
build failure
|
||||
|
||||
The bootloader may call libxl__file_reference_map(), which mmap's the
|
||||
pv_kernel and pv_ramdisk into process memory. This was only unmapped,
|
||||
however, on the success path of libxl__build_pv(). If there were a
|
||||
failure anywhere between libxl_bootloader.c:parse_bootloader_result()
|
||||
and the end of libxl__build_pv(), the calls to
|
||||
libxl__file_reference_unmap() would be skipped, leaking the mapped
|
||||
virtual memory.
|
||||
|
||||
Ideally this would be fixed by adding the unmap calls to the
|
||||
destruction path for libxl__domain_build_state. Unfortunately the
|
||||
lifetime of the libxl__domain_build_state is opaque, and it doesn't
|
||||
have a proper destruction path. But, the only thing in it that isn't
|
||||
from the gc are these bootloader references, and they are only ever
|
||||
set for one libxl__domain_build_state, the one which is
|
||||
libxl__domain_create_state.build_state.
|
||||
|
||||
So we can clean up in the exit path from libxl__domain_create_*, which
|
||||
always comes through domcreate_complete.
|
||||
|
||||
Remove the now-redundant unmaps in libxl__build_pv's success path.
|
||||
|
||||
This is XSA-160.
|
||||
|
||||
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
|
||||
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
|
||||
Tested-by: George Dunlap <george.dunlap@citrix.com>
|
||||
Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
---
|
||||
tools/libxl/libxl_create.c | 3 +++
|
||||
tools/libxl/libxl_dom.c | 3 ---
|
||||
2 files changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
|
||||
index f5771da..278b9ed 100644
|
||||
--- a/tools/libxl/libxl_create.c
|
||||
+++ b/tools/libxl/libxl_create.c
|
||||
@@ -1484,6 +1484,9 @@ static void domcreate_complete(libxl__egc *egc,
|
||||
libxl_domain_config *const d_config = dcs->guest_config;
|
||||
libxl_domain_config *d_config_saved = &dcs->guest_config_saved;
|
||||
|
||||
+ libxl__file_reference_unmap(&dcs->build_state.pv_kernel);
|
||||
+ libxl__file_reference_unmap(&dcs->build_state.pv_ramdisk);
|
||||
+
|
||||
if (!rc && d_config->b_info.exec_ssidref)
|
||||
rc = xc_flask_relabel_domain(CTX->xch, dcs->guest_domid, d_config->b_info.exec_ssidref);
|
||||
|
||||
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
|
||||
index 8019f4e..2da3ac4 100644
|
||||
--- a/tools/libxl/libxl_dom.c
|
||||
+++ b/tools/libxl/libxl_dom.c
|
||||
@@ -750,9 +750,6 @@ int libxl__build_pv(libxl__gc *gc, uint32_t domid,
|
||||
state->store_mfn = xc_dom_p2m_host(dom, dom->xenstore_pfn);
|
||||
}
|
||||
|
||||
- libxl__file_reference_unmap(&state->pv_kernel);
|
||||
- libxl__file_reference_unmap(&state->pv_ramdisk);
|
||||
-
|
||||
ret = 0;
|
||||
out:
|
||||
xc_dom_release(dom);
|
||||
--
|
||||
1.7.10.4
|
||||
|
||||
34
main/xen/xsa164.patch
Normal file
34
main/xen/xsa164.patch
Normal file
@ -0,0 +1,34 @@
|
||||
MSI-X: avoid array overrun upon MSI-X table writes
|
||||
|
||||
pt_msix_init() allocates msix->msix_entry[] to just cover
|
||||
msix->total_entries entries. While pci_msix_readl() resorts to reading
|
||||
physical memory for out of bounds reads, pci_msix_writel() so far
|
||||
simply accessed/corrupted unrelated memory.
|
||||
|
||||
pt_iomem_map()'s call to cpu_register_physical_memory() registers a
|
||||
page granular region, which is necessary as the Pending Bit Array may
|
||||
share space with the MSI-X table (but nothing else is allowed to). This
|
||||
also explains why pci_msix_readl() actually honors out of bounds reads,
|
||||
but pci_msi_writel() doesn't need to.
|
||||
|
||||
This is XSA-164.
|
||||
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
|
||||
--- a/hw/pt-msi.c
|
||||
+++ b/hw/pt-msi.c
|
||||
@@ -440,6 +440,13 @@ static void pci_msix_writel(void *opaque
|
||||
return;
|
||||
}
|
||||
|
||||
+ if ( addr - msix->mmio_base_addr >= msix->total_entries * 16 )
|
||||
+ {
|
||||
+ PT_LOG("Error: Out of bounds write to MSI-X table,"
|
||||
+ " addr %016"PRIx64"\n", addr);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
entry_nr = (addr - msix->mmio_base_addr) / 16;
|
||||
entry = &msix->msix_entry[entry_nr];
|
||||
offset = ((addr - msix->mmio_base_addr) % 16) / 4;
|
||||
85
main/xen/xsa165.patch
Normal file
85
main/xen/xsa165.patch
Normal file
@ -0,0 +1,85 @@
|
||||
x86: don't leak ST(n)/XMMn values to domains first using them
|
||||
|
||||
FNINIT doesn't alter these registers, and hence using it is
|
||||
insufficient to initialize a guest's initial state.
|
||||
|
||||
This is XSA-165.
|
||||
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/domain.c
|
||||
+++ b/xen/arch/x86/domain.c
|
||||
@@ -851,6 +851,17 @@ int arch_set_info_guest(
|
||||
if ( v->arch.xsave_area )
|
||||
v->arch.xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
|
||||
}
|
||||
+ else if ( v->arch.xsave_area )
|
||||
+ memset(&v->arch.xsave_area->xsave_hdr, 0,
|
||||
+ sizeof(v->arch.xsave_area->xsave_hdr));
|
||||
+ else
|
||||
+ {
|
||||
+ typeof(v->arch.xsave_area->fpu_sse) *fpu_sse = v->arch.fpu_ctxt;
|
||||
+
|
||||
+ memset(fpu_sse, 0, sizeof(*fpu_sse));
|
||||
+ fpu_sse->fcw = FCW_DEFAULT;
|
||||
+ fpu_sse->mxcsr = MXCSR_DEFAULT;
|
||||
+ }
|
||||
|
||||
if ( !compat )
|
||||
{
|
||||
--- a/xen/arch/x86/i387.c
|
||||
+++ b/xen/arch/x86/i387.c
|
||||
@@ -17,19 +17,6 @@
|
||||
#include <asm/xstate.h>
|
||||
#include <asm/asm_defns.h>
|
||||
|
||||
-static void fpu_init(void)
|
||||
-{
|
||||
- unsigned long val;
|
||||
-
|
||||
- asm volatile ( "fninit" );
|
||||
- if ( cpu_has_xmm )
|
||||
- {
|
||||
- /* load default value into MXCSR control/status register */
|
||||
- val = MXCSR_DEFAULT;
|
||||
- asm volatile ( "ldmxcsr %0" : : "m" (val) );
|
||||
- }
|
||||
-}
|
||||
-
|
||||
/*******************************/
|
||||
/* FPU Restore Functions */
|
||||
/*******************************/
|
||||
@@ -248,15 +235,8 @@ void vcpu_restore_fpu_lazy(struct vcpu *
|
||||
|
||||
if ( cpu_has_xsave )
|
||||
fpu_xrstor(v, XSTATE_LAZY);
|
||||
- else if ( v->fpu_initialised )
|
||||
- {
|
||||
- if ( cpu_has_fxsr )
|
||||
- fpu_fxrstor(v);
|
||||
- else
|
||||
- fpu_frstor(v);
|
||||
- }
|
||||
else
|
||||
- fpu_init();
|
||||
+ fpu_fxrstor(v);
|
||||
|
||||
v->fpu_initialised = 1;
|
||||
v->fpu_dirtied = 1;
|
||||
@@ -313,7 +293,14 @@ int vcpu_init_fpu(struct vcpu *v)
|
||||
else
|
||||
{
|
||||
v->arch.fpu_ctxt = _xzalloc(sizeof(v->arch.xsave_area->fpu_sse), 16);
|
||||
- if ( !v->arch.fpu_ctxt )
|
||||
+ if ( v->arch.fpu_ctxt )
|
||||
+ {
|
||||
+ typeof(v->arch.xsave_area->fpu_sse) *fpu_sse = v->arch.fpu_ctxt;
|
||||
+
|
||||
+ fpu_sse->fcw = FCW_DEFAULT;
|
||||
+ fpu_sse->mxcsr = MXCSR_DEFAULT;
|
||||
+ }
|
||||
+ else
|
||||
rc = -ENOMEM;
|
||||
}
|
||||
|
||||
77
main/xen/xsa167.patch
Normal file
77
main/xen/xsa167.patch
Normal file
@ -0,0 +1,77 @@
|
||||
x86/mm: PV superpage handling lacks sanity checks
|
||||
|
||||
MMUEXT_{,UN}MARK_SUPER fail to check the input MFN for validity before
|
||||
dereferencing pointers into the superpage frame table.
|
||||
|
||||
get_superpage() has a similar issue.
|
||||
|
||||
This is XSA-167.
|
||||
|
||||
Reported-by: Qinghao Tang <luodalongde@gmail.com>
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/mm.c
|
||||
+++ b/xen/arch/x86/mm.c
|
||||
@@ -2624,6 +2624,9 @@ int get_superpage(unsigned long mfn, str
|
||||
|
||||
ASSERT(opt_allow_superpage);
|
||||
|
||||
+ if ( !mfn_valid(mfn | (L1_PAGETABLE_ENTRIES - 1)) )
|
||||
+ return -EINVAL;
|
||||
+
|
||||
spage = mfn_to_spage(mfn);
|
||||
y = spage->type_info;
|
||||
do {
|
||||
@@ -3401,42 +3404,26 @@ long do_mmuext_op(
|
||||
}
|
||||
|
||||
case MMUEXT_MARK_SUPER:
|
||||
+ case MMUEXT_UNMARK_SUPER:
|
||||
{
|
||||
unsigned long mfn = op.arg1.mfn;
|
||||
|
||||
- if ( unlikely(d != pg_owner) )
|
||||
- rc = -EPERM;
|
||||
- else if ( mfn & (L1_PAGETABLE_ENTRIES-1) )
|
||||
- {
|
||||
- MEM_LOG("Unaligned superpage reference mfn %lx", mfn);
|
||||
- okay = 0;
|
||||
- }
|
||||
- else if ( !opt_allow_superpage )
|
||||
+ if ( !opt_allow_superpage )
|
||||
{
|
||||
MEM_LOG("Superpages disallowed");
|
||||
rc = -ENOSYS;
|
||||
}
|
||||
- else
|
||||
- rc = mark_superpage(mfn_to_spage(mfn), d);
|
||||
- break;
|
||||
- }
|
||||
-
|
||||
- case MMUEXT_UNMARK_SUPER:
|
||||
- {
|
||||
- unsigned long mfn = op.arg1.mfn;
|
||||
-
|
||||
- if ( unlikely(d != pg_owner) )
|
||||
+ else if ( unlikely(d != pg_owner) )
|
||||
rc = -EPERM;
|
||||
- else if ( mfn & (L1_PAGETABLE_ENTRIES-1) )
|
||||
+ else if ( mfn & (L1_PAGETABLE_ENTRIES - 1) )
|
||||
{
|
||||
MEM_LOG("Unaligned superpage reference mfn %lx", mfn);
|
||||
- okay = 0;
|
||||
- }
|
||||
- else if ( !opt_allow_superpage )
|
||||
- {
|
||||
- MEM_LOG("Superpages disallowed");
|
||||
- rc = -ENOSYS;
|
||||
+ rc = -EINVAL;
|
||||
}
|
||||
+ else if ( !mfn_valid(mfn | (L1_PAGETABLE_ENTRIES - 1)) )
|
||||
+ rc = -EINVAL;
|
||||
+ else if ( op.cmd == MMUEXT_MARK_SUPER )
|
||||
+ rc = mark_superpage(mfn_to_spage(mfn), d);
|
||||
else
|
||||
rc = unmark_superpage(mfn_to_spage(mfn));
|
||||
break;
|
||||
27
main/xen/xsa168.patch
Normal file
27
main/xen/xsa168.patch
Normal file
@ -0,0 +1,27 @@
|
||||
x86/VMX: prevent INVVPID failure due to non-canonical guest address
|
||||
|
||||
While INVLPG (and on SVM INVLPGA) don't fault on non-canonical
|
||||
addresses, INVVPID fails (in the "individual address" case) when passed
|
||||
such an address.
|
||||
|
||||
Since such intercepted INVLPG are effectively no-ops anyway, don't fix
|
||||
this in vmx_invlpg_intercept(), but instead have paging_invlpg() never
|
||||
return true in such a case.
|
||||
|
||||
This is XSA-168.
|
||||
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
|
||||
--- a/xen/include/asm-x86/paging.h
|
||||
+++ b/xen/include/asm-x86/paging.h
|
||||
@@ -245,7 +245,7 @@ paging_fault(unsigned long va, struct cp
|
||||
* or 0 if it's safe not to do so. */
|
||||
static inline int paging_invlpg(struct vcpu *v, unsigned long va)
|
||||
{
|
||||
- return paging_get_hostmode(v)->invlpg(v, va);
|
||||
+ return is_canonical_address(va) && paging_get_hostmode(v)->invlpg(v, va);
|
||||
}
|
||||
|
||||
/* Translate a guest virtual address to the frame number that the
|
||||
33
main/xen/xsa169.patch
Normal file
33
main/xen/xsa169.patch
Normal file
@ -0,0 +1,33 @@
|
||||
x86: make debug output consistent in hvm_set_callback_via
|
||||
|
||||
The unconditional printks in the switch statement of the
|
||||
hvm_set_callback_via function results in Xen log spam in non debug
|
||||
versions of Xen. The printks are for debug output only so conditionally
|
||||
compile the entire switch statement on debug versions of Xen only.
|
||||
|
||||
This is XSA-169.
|
||||
|
||||
Signed-off-by: Malcolm Crossley <malcolm.crossley@citrix.com>
|
||||
Reviewed-by: Jan Beulich <jbeulich@suse.com>
|
||||
Acked-by: Ian Campbell <ian.campbell@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/hvm/irq.c
|
||||
+++ b/xen/arch/x86/hvm/irq.c
|
||||
@@ -386,7 +386,8 @@ void hvm_set_callback_via(struct domain
|
||||
|
||||
spin_unlock(&d->arch.hvm_domain.irq_lock);
|
||||
|
||||
- dprintk(XENLOG_G_INFO, "Dom%u callback via changed to ", d->domain_id);
|
||||
+#ifndef NDEBUG
|
||||
+ printk(XENLOG_G_INFO "Dom%u callback via changed to ", d->domain_id);
|
||||
switch ( via_type )
|
||||
{
|
||||
case HVMIRQ_callback_gsi:
|
||||
@@ -402,6 +403,7 @@ void hvm_set_callback_via(struct domain
|
||||
printk("None\n");
|
||||
break;
|
||||
}
|
||||
+#endif
|
||||
}
|
||||
|
||||
struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
|
||||
79
main/xen/xsa170.patch
Normal file
79
main/xen/xsa170.patch
Normal file
@ -0,0 +1,79 @@
|
||||
x86/VMX: sanitize rIP before re-entering guest
|
||||
|
||||
... to prevent guest user mode arranging for a guest crash (due to
|
||||
failed VM entry). (On the AMD system I checked, hardware is doing
|
||||
exactly the canonicalization being added here.)
|
||||
|
||||
Note that fixing this in an architecturally correct way would be quite
|
||||
a bit more involved: Making the x86 instruction emulator check all
|
||||
branch targets for validity, plus dealing with invalid rIP resulting
|
||||
from update_guest_eip() or incoming directly during a VM exit. The only
|
||||
way to get the latter right would be by not having hardware do the
|
||||
injection.
|
||||
|
||||
Note further that there are a two early returns from
|
||||
vmx_vmexit_handler(): One (through vmx_failed_vmentry()) leads to
|
||||
domain_crash() anyway, and the other covers real mode only and can
|
||||
neither occur with a non-canonical rIP nor result in an altered rIP,
|
||||
so we don't need to force those paths through the checking logic.
|
||||
|
||||
This is XSA-170.
|
||||
|
||||
Reported-by: 刘令 <liuling-it@360.cn>
|
||||
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
||||
|
||||
--- a/xen/arch/x86/hvm/vmx/vmx.c
|
||||
+++ b/xen/arch/x86/hvm/vmx/vmx.c
|
||||
@@ -2968,7 +2968,7 @@ static int vmx_handle_apic_write(void)
|
||||
void vmx_vmexit_handler(struct cpu_user_regs *regs)
|
||||
{
|
||||
unsigned long exit_qualification, exit_reason, idtv_info, intr_info = 0;
|
||||
- unsigned int vector = 0;
|
||||
+ unsigned int vector = 0, mode;
|
||||
struct vcpu *v = current;
|
||||
|
||||
__vmread(GUEST_RIP, ®s->rip);
|
||||
@@ -3566,6 +3566,41 @@ void vmx_vmexit_handler(struct cpu_user_
|
||||
out:
|
||||
if ( nestedhvm_vcpu_in_guestmode(v) )
|
||||
nvmx_idtv_handling();
|
||||
+
|
||||
+ /*
|
||||
+ * VM entry will fail (causing the guest to get crashed) if rIP (and
|
||||
+ * rFLAGS, but we don't have an issue there) doesn't meet certain
|
||||
+ * criteria. As we must not allow less than fully privileged mode to have
|
||||
+ * such an effect on the domain, we correct rIP in that case (accepting
|
||||
+ * this not being architecturally correct behavior, as the injected #GP
|
||||
+ * fault will then not see the correct [invalid] return address).
|
||||
+ * And since we know the guest will crash, we crash it right away if it
|
||||
+ * already is in most privileged mode.
|
||||
+ */
|
||||
+ mode = vmx_guest_x86_mode(v);
|
||||
+ if ( mode == 8 ? !is_canonical_address(regs->rip)
|
||||
+ : regs->rip != regs->_eip )
|
||||
+ {
|
||||
+ struct segment_register ss;
|
||||
+
|
||||
+ gprintk(XENLOG_WARNING, "Bad rIP %lx for mode %u\n", regs->rip, mode);
|
||||
+
|
||||
+ vmx_get_segment_register(v, x86_seg_ss, &ss);
|
||||
+ if ( ss.attr.fields.dpl )
|
||||
+ {
|
||||
+ __vmread(VM_ENTRY_INTR_INFO, &intr_info);
|
||||
+ if ( !(intr_info & INTR_INFO_VALID_MASK) )
|
||||
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
|
||||
+ /* Need to fix rIP nevertheless. */
|
||||
+ if ( mode == 8 )
|
||||
+ regs->rip = (long)(regs->rip << (64 - VADDR_BITS)) >>
|
||||
+ (64 - VADDR_BITS);
|
||||
+ else
|
||||
+ regs->rip = regs->_eip;
|
||||
+ }
|
||||
+ else
|
||||
+ domain_crash(v->domain);
|
||||
+ }
|
||||
}
|
||||
|
||||
void vmx_vmenter_helper(const struct cpu_user_regs *regs)
|
||||
Loading…
x
Reference in New Issue
Block a user