main/xen: upgrade to 4.21.1

This commit is contained in:
omni 2026-03-26 16:24:31 +00:00 committed by Natanael Copa
parent e7fb0ce85f
commit dc6365af87
5 changed files with 3 additions and 300 deletions

View File

@ -1,8 +1,8 @@
# Contributor: Roger Pau Monne <roger.pau@entel.upc.edu>
# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
pkgname=xen
pkgver=4.21.0
pkgrel=3
pkgver=4.21.1
pkgrel=0
pkgdesc="Xen hypervisor"
url="https://www.xenproject.org/"
arch="x86_64 armv7 aarch64"
@ -432,11 +432,6 @@ source="https://downloads.xenproject.org/release/xen/$pkgver/xen-$pkgver.tar.gz
hotplug-Linux-iscsi-block-handle-lun-1.patch
xsa477.patch
xsa479.patch
xsa480.patch
xsa481.patch
xenstored.initd
xenstored.confd
xenconsoled.initd
@ -672,14 +667,10 @@ qemu_openrc() {
}
sha512sums="
9a89578ce62c8adc43bb60bb59dfbfb7c2e5b8ec71ee8e104547bbb61bf5e95df302e48f52956114d00bf2354667382e3c494fce8e7134383a6b6f98e7abb219 xen-4.21.0.tar.gz
88a961f0203374e6f00d04185d7fb4803dbdaa529a46e5dcdeda87743147e1974c717cefc5e8b11e129e7056358c879ea8248293b0913578d16be32aeafb5646 xen-4.21.1.tar.gz
27a39198aa75bb42825f67ed2c76a2baf65ffd95b52259a3d36863d010a4608ac7f39e07887ffdaab35df1982c36a1c7fd5b8c7d974fb5ebab52aff897e1e6b3 qemu-xen_paths.patch
1c9cb24bf67a2e84466572198315d5501627addf1ccd55d8d83df8d77d269a6696cd45e4a55601495168284e3bff58fb39853f56c46aaddd14f6191821678cf6 hotplug-vif-vtrill.patch
8c9cfc6afca325df1d8026e21ed03fa8cd2c7e1a21a56cc1968301c5ab634bfe849951899e75d328951d7a41273d1e49a2448edbadec0029ed410c43c0549812 hotplug-Linux-iscsi-block-handle-lun-1.patch
3458e804fe201bdfb662e7a2c37348623574020d52d84b3f29b24aea882669720c28b0f8ea7a0a57961311ae86c0f96bc8e65bd4789ca2324436277fed1bb4a2 xsa477.patch
5178c09a5c72aecc1d19cf612e7d7933db87e0b50646fa53b2351995327bf8b4673bed23e87aee3b12ed78edf5d38f982f37f7a8e86bf0d6d07da8530051132b xsa479.patch
20b5b5ba0c12578a5ba9c8f970eb97e3587c7051b5a663f67611009f1581f0d07f26b0cd8580be6dd212cf2020b4b95bc9f69bb44e610447cc45f05ea8399bd0 xsa480.patch
f0a6c1db34b18efcb93694b878b8810b889181710d0dac299e598353e5274b8aed52af9e3338f0f5b832228f09e26440ef2c49687100d3090ad758bc0a94aae9 xsa481.patch
9430940692d6bfb58b1438e0f5f84cb703fbca9ce9cc157a1313ab1ceff63222a1ae31c991543b20c8fc84300df2b22f4614b27bbff32f82e17f27fcd953143c xenstored.initd
093f7fbd43faf0a16a226486a0776bade5dc1681d281c5946a3191c32d74f9699c6bf5d0ab8de9d1195a2461165d1660788e92a3156c9b3c7054d7b2d52d7ff0 xenstored.confd
1dd04f4bf1890771aa7eef0b6e46f7139487da0907d28dcdbef9fbe335dcf731ca391cfcb175dd82924f637a308de00a69ae981f67348c34f04489ec5e5dc3b7 xenconsoled.initd

View File

@ -1,105 +0,0 @@
From: Jan Beulich <jbeulich@suse.com>
Subject: x86/shadow: don't overrun trace_emul_write_val
Guests can do wider-than-PTE-size writes on page tables. The tracing
helper variable, however, only offers space for a single PTE (and it is
being switched to the more correct type right here). Therefore bound
incoming write sizes to the amount of space available.
To not leave dead code (which is a Misra concern), drop the now unused
guest_pa_t as well.
Also move and adjust GUEST_PTE_SIZE: Derive it rather than using hard-
coded numbers, and put it in the sole source file where it's actually
needed. This then also addresses a Misra rule 20.9 ("All identifiers
used in the controlling expression of #if or #elif preprocessing
directives shall be #define'd before evaluation") violation:
GUEST_PAGING_LEVELS is #define'd only in multi.c.
This is XSA-477 / CVE-2025-58150.
Fixes: 9a86ac1aa3d2 ("xentrace 5/7: Additional tracing for the shadow code")
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1970,15 +1970,15 @@ static void sh_prefetch(struct vcpu *v,
#if GUEST_PAGING_LEVELS == 4
typedef u64 guest_va_t;
-typedef u64 guest_pa_t;
#elif GUEST_PAGING_LEVELS == 3
typedef u32 guest_va_t;
-typedef u64 guest_pa_t;
#else
typedef u32 guest_va_t;
-typedef u32 guest_pa_t;
#endif
+/* Size (in bytes) of a guest PTE */
+#define GUEST_PTE_SIZE sizeof(guest_l1e_t)
+
/* Shadow trace event with GUEST_PAGING_LEVELS folded into the event field. */
static void sh_trace(uint32_t event, unsigned int extra, const void *extra_data)
{
@@ -2048,11 +2048,14 @@ static void __maybe_unused sh_trace_gfn_
static DEFINE_PER_CPU(guest_va_t,trace_emulate_initial_va);
static DEFINE_PER_CPU(int,trace_extra_emulation_count);
#endif
-static DEFINE_PER_CPU(guest_pa_t,trace_emulate_write_val);
+static DEFINE_PER_CPU(guest_l1e_t, trace_emulate_write_val);
static void cf_check trace_emulate_write_val(
const void *ptr, unsigned long vaddr, const void *src, unsigned int bytes)
{
+ if ( bytes > sizeof(this_cpu(trace_emulate_write_val)) )
+ bytes = sizeof(this_cpu(trace_emulate_write_val));
+
#if GUEST_PAGING_LEVELS == 3
if ( vaddr == this_cpu(trace_emulate_initial_va) )
memcpy(&this_cpu(trace_emulate_write_val), src, bytes);
@@ -2077,13 +2080,16 @@ static inline void sh_trace_emulate(gues
/*
* For GUEST_PAGING_LEVELS=3 (PAE paging), guest_l1e is 64 while
* guest_va is 32. Put it first to avoid padding.
+ *
+ * Note: .write_val is an arbitrary set of written bytes, possibly
+ * misaligned and possibly spanning the next gl1e.
*/
guest_l1e_t gl1e, write_val;
guest_va_t va;
uint32_t flags:29, emulation_count:3;
} d = {
.gl1e = gl1e,
- .write_val.l1 = this_cpu(trace_emulate_write_val),
+ .write_val = this_cpu(trace_emulate_write_val),
.va = va,
#if GUEST_PAGING_LEVELS == 3
.emulation_count = this_cpu(trace_extra_emulation_count),
@@ -2672,7 +2677,7 @@ static int cf_check sh_page_fault(
paging_unlock(d);
put_gfn(d, gfn_x(gfn));
- this_cpu(trace_emulate_write_val) = 0;
+ this_cpu(trace_emulate_write_val) = (guest_l1e_t){};
#if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
early_emulation:
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -120,14 +120,6 @@ enum {
TRCE_SFLAG_OOS_FIXUP_EVICT,
};
-
-/* Size (in bytes) of a guest PTE */
-#if GUEST_PAGING_LEVELS >= 3
-# define GUEST_PTE_SIZE 8
-#else
-# define GUEST_PTE_SIZE 4
-#endif
-
/******************************************************************************
* Auditing routines
*/

View File

@ -1,81 +0,0 @@
From: Roger Pau Monné <roger.pau@citrix.com>
Subject: x86/spec-ctrl: Fix incomplete IBPB flushing during context switch
The previous logic attempted to skip an IBPB in the case of vCPU returning to
a CPU on which it was the previous vCPU to run. While safe for Xen's
isolation between vCPUs, this prevents the guest kernel correctly isolation
between tasks. Consider:
1) vCPU runs on CPU A, running task 1.
2) vCPU moves to CPU B, idle gets scheduled on A. Xen skips IBPB.
3) On CPU B, guest kernel switches from task 1 to 2, issuing IBPB.
4) vCPU moves back to CPU A. Xen skips IBPB again.
Now, task 2 is running on CPU A with task 1's training still in the BTB.
Do the flush unconditionally when switching to a vCPU different than the
idle one. Note there's no need to explicitly gate the IBPB to next domain
!= idle, as the context where the IBPB is issued is subject to that
condition already unless the pCPU is going offline, at which point we don't
really care to issue an extra IBPB.
Also add a comment with the reasoning why the IBPB needs to be in
context_switch() rather than __context_switch().
This is XSA-479 / CVE-2026-23553.
Fixes: a2ed643ed783 ("x86/ctxt: Issue a speculation barrier between vcpu contexts")
Reported-by: David Kaplan <david.kaplan@amd.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
xen/arch/x86/domain.c | 36 +++++++++---------------------------
1 file changed, 9 insertions(+), 27 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index c29a6b0decee..c1eded3eb604 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2174,33 +2174,15 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
ctxt_switch_levelling(next);
- if ( opt_ibpb_ctxt_switch && !is_idle_domain(nextd) )
- {
- static DEFINE_PER_CPU(unsigned int, last);
- unsigned int *last_id = &this_cpu(last);
-
- /*
- * Squash the domid and vcpu id together for comparison
- * efficiency. We could in principle stash and compare the struct
- * vcpu pointer, but this risks a false alias if a domain has died
- * and the same 4k page gets reused for a new vcpu.
- */
- unsigned int next_id = (((unsigned int)nextd->domain_id << 16) |
- (uint16_t)next->vcpu_id);
- BUILD_BUG_ON(MAX_VIRT_CPUS > 0xffff);
-
- /*
- * When scheduling from a vcpu, to idle, and back to the same vcpu
- * (which might be common in a lightly loaded system, or when
- * using vcpu pinning), there is no need to issue IBPB, as we are
- * returning to the same security context.
- */
- if ( *last_id != next_id )
- {
- spec_ctrl_new_guest_context();
- *last_id = next_id;
- }
- }
+ /*
+ * Issue an IBPB when scheduling a different vCPU if required.
+ *
+ * IBPB clears the RSB/RAS/RAP, but that's fine as we leave this
+ * function via reset_stack_and_call_ind() rather than via a RET
+ * instruction.
+ */
+ if ( opt_ibpb_ctxt_switch )
+ spec_ctrl_new_guest_context();
/* Update the top-of-stack block with the new speculation settings. */
info->scf =

View File

@ -1,46 +0,0 @@
From 45f6866e34b7e9ee8b6ac16d646a2e954c97e48e Mon Sep 17 00:00:00 2001
From: Roger Pau Monne <roger.pau@citrix.com>
Date: Tue, 17 Feb 2026 09:33:43 +0100
Subject: [PATCH] x86/p2m: issue a sync flush before freeing paging pages
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
In the EPT implementation, the defer flushing logic is used
unconditionally, and that would lead to paging memory being returned to the
paging pool before its references had been flushed.
Issue any pending flushes before freeing the paging memory back to the
pool.
Note AMD (NPT) and Shadow paging are not affected, as they don't implement
the deferred flushing logic.
This is XSA-480 / CVE-2026-23554
Fixes: 4a59e6bb3a96 ("x86/EPT: squash meaningless TLB flush")
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
xen/arch/x86/mm/p2m.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e915da26a832..fddecdf978ec 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -479,6 +479,11 @@ void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg)
ASSERT(p2m->domain);
ASSERT(p2m->domain->arch.paging.free_page);
+ /*
+ * Issue any pending flush here, in case it was deferred before. The page
+ * will be returned to the paging pool now.
+ */
+ p2m_tlb_flush_sync(p2m);
page_list_del(pg, &p2m->pages);
p2m->domain->arch.paging.free_page(p2m->domain, pg);
--
2.51.0

View File

@ -1,56 +0,0 @@
From 0cff16f0a997f1b0871b621a1d6050652530e5d9 Mon Sep 17 00:00:00 2001
From: Juergen Gross <jgross@suse.com>
Date: Thu, 12 Feb 2026 08:29:38 +0100
Subject: [PATCH] tools/xenstored: fix canonicalize() error testing
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The setting of errno in canonicalize() is rather fragile and seems to
be even wrong in one corner case: when the invalid path "/local/domain/"
is passed, sscanf() will set errno to 0, resulting in canonicalize() to
return NULL with errno being 0. This can result in triggering the
assert(conn->in == NULL) in consider_message().
Don't assume the initial setting of errno to "EINVAL" will stay valid
in all cases and set it to EINVAL only when returning NULL due to an
invalid path.
This is XSA-481/CVE-2026-23555
Reported-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Julien Grall <julien@xen.org>
---
tools/xenstored/core.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/tools/xenstored/core.c b/tools/xenstored/core.c
index 64c478a801..2e826f99eb 100644
--- a/tools/xenstored/core.c
+++ b/tools/xenstored/core.c
@@ -1240,11 +1240,10 @@ const char *canonicalize(struct connection *conn, const void *ctx,
* - illegal character in node
* - starts with '@' but no special node allowed
*/
- errno = EINVAL;
if (!node ||
!valid_chars(node) ||
(node[0] == '@' && !allow_special))
- return NULL;
+ goto inval;
if (node[0] != '/' && node[0] != '@') {
name = talloc_asprintf(ctx, "%s/%s", get_implicit_path(conn),
@@ -1272,6 +1271,8 @@ const char *canonicalize(struct connection *conn, const void *ctx,
if (name != node)
talloc_free(name);
+ inval:
+ errno = EINVAL;
return NULL;
}
--
2.53.0