aboutsummaryrefslogtreecommitdiff
path: root/system/xen/xsa
diff options
context:
space:
mode:
authorMario Preksavec <mario@slackware.hr>2018-03-08 10:55:49 +0100
committerWilly Sudiarto Raharjo <willysr@slackbuilds.org>2018-03-10 07:11:11 +0700
commit451ae8400d2e5e365da89a52c166f86f80fb3a61 (patch)
tree6ae9517b523bb1e7899b43c32deb8224a8cf6bb0 /system/xen/xsa
parentec5f9bb5d508ee64fc1b442a53f21ee7280ed746 (diff)
system/xen: Updated for version 4.10.0.
Signed-off-by: Mario Preksavec <mario@slackware.hr>
Diffstat (limited to 'system/xen/xsa')
-rw-r--r--system/xen/xsa/xsa246-4.9.patch74
-rw-r--r--system/xen/xsa/xsa247-4.9-0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch176
-rw-r--r--system/xen/xsa/xsa247-4.9-0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch109
-rw-r--r--system/xen/xsa/xsa248.patch164
-rw-r--r--system/xen/xsa/xsa249.patch42
-rw-r--r--system/xen/xsa/xsa250.patch67
-rw-r--r--system/xen/xsa/xsa251.patch21
-rw-r--r--system/xen/xsa/xsa252.patch27
-rw-r--r--system/xen/xsa/xsa253.patch26
-rw-r--r--system/xen/xsa/xsa255-1.patch133
-rw-r--r--system/xen/xsa/xsa255-2.patch167
-rw-r--r--system/xen/xsa/xsa256.patch40
12 files changed, 393 insertions, 653 deletions
diff --git a/system/xen/xsa/xsa246-4.9.patch b/system/xen/xsa/xsa246-4.9.patch
deleted file mode 100644
index 6370a106254c8..0000000000000
--- a/system/xen/xsa/xsa246-4.9.patch
+++ /dev/null
@@ -1,74 +0,0 @@
-From: Julien Grall <julien.grall@linaro.org>
-Subject: x86/pod: prevent infinite loop when shattering large pages
-
-When populating pages, the PoD may need to split large ones using
-p2m_set_entry and request the caller to retry (see ept_get_entry for
-instance).
-
-p2m_set_entry may fail to shatter if it is not possible to allocate
-memory for the new page table. However, the error is not propagated
-resulting to the callers to retry infinitely the PoD.
-
-Prevent the infinite loop by return false when it is not possible to
-shatter the large mapping.
-
-This is XSA-246.
-
-Signed-off-by: Julien Grall <julien.grall@linaro.org>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
-
---- a/xen/arch/x86/mm/p2m-pod.c
-+++ b/xen/arch/x86/mm/p2m-pod.c
-@@ -1071,9 +1071,8 @@ p2m_pod_demand_populate(struct p2m_domai
- * NOTE: In a fine-grained p2m locking scenario this operation
- * may need to promote its locking from gfn->1g superpage
- */
-- p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
-- p2m_populate_on_demand, p2m->default_access);
-- return 0;
-+ return p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
-+ p2m_populate_on_demand, p2m->default_access);
- }
-
- /* Only reclaim if we're in actual need of more cache. */
-@@ -1104,8 +1103,12 @@ p2m_pod_demand_populate(struct p2m_domai
-
- gfn_aligned = (gfn >> order) << order;
-
-- p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
-- p2m->default_access);
-+ if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
-+ p2m->default_access) )
-+ {
-+ p2m_pod_cache_add(p2m, p, order);
-+ goto out_fail;
-+ }
-
- for( i = 0; i < (1UL << order); i++ )
- {
-@@ -1150,13 +1153,18 @@ remap_and_retry:
- BUG_ON(order != PAGE_ORDER_2M);
- pod_unlock(p2m);
-
-- /* Remap this 2-meg region in singleton chunks */
-- /* NOTE: In a p2m fine-grained lock scenario this might
-- * need promoting the gfn lock from gfn->2M superpage */
-+ /*
-+ * Remap this 2-meg region in singleton chunks. See the comment on the
-+ * 1G page splitting path above for why a single call suffices.
-+ *
-+ * NOTE: In a p2m fine-grained lock scenario this might
-+ * need promoting the gfn lock from gfn->2M superpage.
-+ */
- gfn_aligned = (gfn>>order)<<order;
-- for(i=0; i<(1<<order); i++)
-- p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
-- p2m_populate_on_demand, p2m->default_access);
-+ if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K,
-+ p2m_populate_on_demand, p2m->default_access) )
-+ return -1;
-+
- if ( tb_init_done )
- {
- struct {
diff --git a/system/xen/xsa/xsa247-4.9-0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch b/system/xen/xsa/xsa247-4.9-0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
deleted file mode 100644
index ad9524a304195..0000000000000
--- a/system/xen/xsa/xsa247-4.9-0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
+++ /dev/null
@@ -1,176 +0,0 @@
-From ad208b8b7e45fb2b7c572b86c61c26412609e82d Mon Sep 17 00:00:00 2001
-From: George Dunlap <george.dunlap@citrix.com>
-Date: Fri, 10 Nov 2017 16:53:54 +0000
-Subject: [PATCH 1/2] p2m: Always check to see if removing a p2m entry actually
- worked
-
-The PoD zero-check functions speculatively remove memory from the p2m,
-then check to see if it's completely zeroed, before putting it in the
-cache.
-
-Unfortunately, the p2m_set_entry() calls may fail if the underlying
-pagetable structure needs to change and the domain has exhausted its
-p2m memory pool: for instance, if we're removing a 2MiB region out of
-a 1GiB entry (in the p2m_pod_zero_check_superpage() case), or a 4k
-region out of a 2MiB or larger entry (in the p2m_pod_zero_check()
-case); and the return value is not checked.
-
-The underlying mfn will then be added into the PoD cache, and at some
-point mapped into another location in the p2m. If the guest
-afterwards ballons out this memory, it will be freed to the hypervisor
-and potentially reused by another domain, in spite of the fact that
-the original domain still has writable mappings to it.
-
-There are several places where p2m_set_entry() shouldn't be able to
-fail, as it is guaranteed to write an entry of the same order that
-succeeded before. Add a backstop of crashing the domain just in case,
-and an ASSERT_UNREACHABLE() to flag up the broken assumption on debug
-builds.
-
-While we're here, use PAGE_ORDER_2M rather than a magic constant.
-
-This is part of XSA-247.
-
-Reported-by: George Dunlap <george.dunlap.com>
-Signed-off-by: George Dunlap <george.dunlap@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
----
-v4:
-- Removed some training whitespace
-v3:
-- Reformat reset clause to be more compact
-- Make sure to set map[i] = NULL when unmapping in case we need to bail
-v2:
-- Crash a domain if a p2m_set_entry we think cannot fail fails anyway.
----
- xen/arch/x86/mm/p2m-pod.c | 77 +++++++++++++++++++++++++++++++++++++----------
- 1 file changed, 61 insertions(+), 16 deletions(-)
-
-diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
-index 730a48f928..f2ed751892 100644
---- a/xen/arch/x86/mm/p2m-pod.c
-+++ b/xen/arch/x86/mm/p2m-pod.c
-@@ -752,8 +752,10 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
- }
-
- /* Try to remove the page, restoring old mapping if it fails. */
-- p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
-- p2m_populate_on_demand, p2m->default_access);
-+ if ( p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
-+ p2m_populate_on_demand, p2m->default_access) )
-+ goto out;
-+
- p2m_tlb_flush_sync(p2m);
-
- /* Make none of the MFNs are used elsewhere... for example, mapped
-@@ -810,9 +812,18 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
- ret = SUPERPAGE_PAGES;
-
- out_reset:
-- if ( reset )
-- p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
--
-+ /*
-+ * This p2m_set_entry() call shouldn't be able to fail, since the same order
-+ * on the same gfn succeeded above. If that turns out to be false, crashing
-+ * the domain should be the safest way of making sure we don't leak memory.
-+ */
-+ if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M,
-+ type0, p2m->default_access) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ domain_crash(d);
-+ }
-+
- out:
- gfn_unlock(p2m, gfn, SUPERPAGE_ORDER);
- return ret;
-@@ -869,19 +880,30 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
- }
-
- /* Try to remove the page, restoring old mapping if it fails. */
-- p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
-- p2m_populate_on_demand, p2m->default_access);
-+ if ( p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
-+ p2m_populate_on_demand, p2m->default_access) )
-+ goto skip;
-
- /* See if the page was successfully unmapped. (Allow one refcount
- * for being allocated to a domain.) */
- if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 )
- {
-+ /*
-+ * If the previous p2m_set_entry call succeeded, this one shouldn't
-+ * be able to fail. If it does, crashing the domain should be safe.
-+ */
-+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
-+ types[i], p2m->default_access) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ domain_crash(d);
-+ goto out_unmap;
-+ }
-+
-+ skip:
- unmap_domain_page(map[i]);
- map[i] = NULL;
-
-- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
-- types[i], p2m->default_access);
--
- continue;
- }
- }
-@@ -900,12 +922,25 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
-
- unmap_domain_page(map[i]);
-
-- /* See comment in p2m_pod_zero_check_superpage() re gnttab
-- * check timing. */
-- if ( j < PAGE_SIZE/sizeof(*map[i]) )
-+ map[i] = NULL;
-+
-+ /*
-+ * See comment in p2m_pod_zero_check_superpage() re gnttab
-+ * check timing.
-+ */
-+ if ( j < (PAGE_SIZE / sizeof(*map[i])) )
- {
-- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
-- types[i], p2m->default_access);
-+ /*
-+ * If the previous p2m_set_entry call succeeded, this one shouldn't
-+ * be able to fail. If it does, crashing the domain should be safe.
-+ */
-+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
-+ types[i], p2m->default_access) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ domain_crash(d);
-+ goto out_unmap;
-+ }
- }
- else
- {
-@@ -929,7 +964,17 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
- p2m->pod.entry_count++;
- }
- }
--
-+
-+ return;
-+
-+out_unmap:
-+ /*
-+ * Something went wrong, probably crashing the domain. Unmap
-+ * everything and return.
-+ */
-+ for ( i = 0; i < count; i++ )
-+ if ( map[i] )
-+ unmap_domain_page(map[i]);
- }
-
- #define POD_SWEEP_LIMIT 1024
---
-2.15.0
-
diff --git a/system/xen/xsa/xsa247-4.9-0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch b/system/xen/xsa/xsa247-4.9-0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
deleted file mode 100644
index 8c850bd7f55d1..0000000000000
--- a/system/xen/xsa/xsa247-4.9-0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
+++ /dev/null
@@ -1,109 +0,0 @@
-From d4bc7833707351a5341a6bdf04c752a028d9560d Mon Sep 17 00:00:00 2001
-From: George Dunlap <george.dunlap@citrix.com>
-Date: Fri, 10 Nov 2017 16:53:55 +0000
-Subject: [PATCH 2/2] p2m: Check return value of p2m_set_entry() when
- decreasing reservation
-
-If the entire range specified to p2m_pod_decrease_reservation() is marked
-populate-on-demand, then it will make a single p2m_set_entry() call,
-reducing its PoD entry count.
-
-Unfortunately, in the right circumstances, this p2m_set_entry() call
-may fail. It that case, repeated calls to decrease_reservation() may
-cause p2m->pod.entry_count to fall below zero, potentially tripping
-over BUG_ON()s to the contrary.
-
-Instead, check to see if the entry succeeded, and return false if not.
-The caller will then call guest_remove_page() on the gfns, which will
-return -EINVAL upon finding no valid memory there to return.
-
-Unfortunately if the order > 0, the entry may have partially changed.
-A domain_crash() is probably the safest thing in that case.
-
-Other p2m_set_entry() calls in the same function should be fine,
-because they are writing the entry at its current order. Nonetheless,
-check the return value and crash if our assumption turns otu to be
-wrong.
-
-This is part of XSA-247.
-
-Reported-by: George Dunlap <george.dunlap.com>
-Signed-off-by: George Dunlap <george.dunlap@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
----
-v2: Crash the domain if we're not sure it's safe (or if we think it
-can't happen)
----
- xen/arch/x86/mm/p2m-pod.c | 42 +++++++++++++++++++++++++++++++++---------
- 1 file changed, 33 insertions(+), 9 deletions(-)
-
-diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
-index f2ed751892..473d6a6dbf 100644
---- a/xen/arch/x86/mm/p2m-pod.c
-+++ b/xen/arch/x86/mm/p2m-pod.c
-@@ -555,11 +555,23 @@ p2m_pod_decrease_reservation(struct domain *d,
-
- if ( !nonpod )
- {
-- /* All PoD: Mark the whole region invalid and tell caller
-- * we're done. */
-- p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
-- p2m->default_access);
-- p2m->pod.entry_count-=(1<<order);
-+ /*
-+ * All PoD: Mark the whole region invalid and tell caller
-+ * we're done.
-+ */
-+ if ( p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
-+ p2m->default_access) )
-+ {
-+ /*
-+ * If this fails, we can't tell how much of the range was changed.
-+ * Best to crash the domain unless we're sure a partial change is
-+ * impossible.
-+ */
-+ if ( order != 0 )
-+ domain_crash(d);
-+ goto out_unlock;
-+ }
-+ p2m->pod.entry_count -= 1UL << order;
- BUG_ON(p2m->pod.entry_count < 0);
- ret = 1;
- goto out_entry_check;
-@@ -600,8 +612,14 @@ p2m_pod_decrease_reservation(struct domain *d,
- n = 1UL << cur_order;
- if ( t == p2m_populate_on_demand )
- {
-- p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
-- p2m_invalid, p2m->default_access);
-+ /* This shouldn't be able to fail */
-+ if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
-+ p2m_invalid, p2m->default_access) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ domain_crash(d);
-+ goto out_unlock;
-+ }
- p2m->pod.entry_count -= n;
- BUG_ON(p2m->pod.entry_count < 0);
- pod -= n;
-@@ -622,8 +640,14 @@ p2m_pod_decrease_reservation(struct domain *d,
-
- page = mfn_to_page(mfn);
-
-- p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
-- p2m_invalid, p2m->default_access);
-+ /* This shouldn't be able to fail */
-+ if ( p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
-+ p2m_invalid, p2m->default_access) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ domain_crash(d);
-+ goto out_unlock;
-+ }
- p2m_tlb_flush_sync(p2m);
- for ( j = 0; j < n; ++j )
- set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
---
-2.15.0
-
diff --git a/system/xen/xsa/xsa248.patch b/system/xen/xsa/xsa248.patch
deleted file mode 100644
index 966c16e043aae..0000000000000
--- a/system/xen/xsa/xsa248.patch
+++ /dev/null
@@ -1,164 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/mm: don't wrongly set page ownership
-
-PV domains can obtain mappings of any pages owned by the correct domain,
-including ones that aren't actually assigned as "normal" RAM, but used
-by Xen internally. At the moment such "internal" pages marked as owned
-by a guest include pages used to track logdirty bits, as well as p2m
-pages and the "unpaged pagetable" for HVM guests. Since the PV memory
-management and shadow code conflict in their use of struct page_info
-fields, and since shadow code is being used for log-dirty handling for
-PV domains, pages coming from the shadow pool must, for PV domains, not
-have the domain set as their owner.
-
-While the change could be done conditionally for just the PV case in
-shadow code, do it unconditionally (and for consistency also for HAP),
-just to be on the safe side.
-
-There's one special case though for shadow code: The page table used for
-running a HVM guest in unpaged mode is subject to get_page() (in
-set_shadow_status()) and hence must have its owner set.
-
-This is XSA-248.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Tim Deegan <tim@xen.org>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
----
-v2: Drop PGC_page_table related pieces.
-
---- a/xen/arch/x86/mm/hap/hap.c
-+++ b/xen/arch/x86/mm/hap/hap.c
-@@ -286,8 +286,7 @@ static struct page_info *hap_alloc_p2m_p
- {
- d->arch.paging.hap.total_pages--;
- d->arch.paging.hap.p2m_pages++;
-- page_set_owner(pg, d);
-- pg->count_info |= 1;
-+ ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
- }
- else if ( !d->arch.paging.p2m_alloc_failed )
- {
-@@ -302,21 +301,23 @@ static struct page_info *hap_alloc_p2m_p
-
- static void hap_free_p2m_page(struct domain *d, struct page_info *pg)
- {
-+ struct domain *owner = page_get_owner(pg);
-+
- /* This is called both from the p2m code (which never holds the
- * paging lock) and the log-dirty code (which always does). */
- paging_lock_recursive(d);
-
-- ASSERT(page_get_owner(pg) == d);
-- /* Should have just the one ref we gave it in alloc_p2m_page() */
-- if ( (pg->count_info & PGC_count_mask) != 1 ) {
-- HAP_ERROR("Odd p2m page %p count c=%#lx t=%"PRtype_info"\n",
-- pg, pg->count_info, pg->u.inuse.type_info);
-+ /* Should still have no owner and count zero. */
-+ if ( owner || (pg->count_info & PGC_count_mask) )
-+ {
-+ HAP_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n",
-+ d->domain_id, mfn_x(page_to_mfn(pg)),
-+ owner ? owner->domain_id : DOMID_INVALID,
-+ pg->count_info, pg->u.inuse.type_info);
- WARN();
-+ pg->count_info &= ~PGC_count_mask;
-+ page_set_owner(pg, NULL);
- }
-- pg->count_info &= ~PGC_count_mask;
-- /* Free should not decrement domain's total allocation, since
-- * these pages were allocated without an owner. */
-- page_set_owner(pg, NULL);
- d->arch.paging.hap.p2m_pages--;
- d->arch.paging.hap.total_pages++;
- hap_free(d, page_to_mfn(pg));
---- a/xen/arch/x86/mm/shadow/common.c
-+++ b/xen/arch/x86/mm/shadow/common.c
-@@ -1503,32 +1503,29 @@ shadow_alloc_p2m_page(struct domain *d)
- pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
- d->arch.paging.shadow.p2m_pages++;
- d->arch.paging.shadow.total_pages--;
-+ ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
-
- paging_unlock(d);
-
-- /* Unlike shadow pages, mark p2m pages as owned by the domain.
-- * Marking the domain as the owner would normally allow the guest to
-- * create mappings of these pages, but these p2m pages will never be
-- * in the domain's guest-physical address space, and so that is not
-- * believed to be a concern. */
-- page_set_owner(pg, d);
-- pg->count_info |= 1;
- return pg;
- }
-
- static void
- shadow_free_p2m_page(struct domain *d, struct page_info *pg)
- {
-- ASSERT(page_get_owner(pg) == d);
-- /* Should have just the one ref we gave it in alloc_p2m_page() */
-- if ( (pg->count_info & PGC_count_mask) != 1 )
-+ struct domain *owner = page_get_owner(pg);
-+
-+ /* Should still have no owner and count zero. */
-+ if ( owner || (pg->count_info & PGC_count_mask) )
- {
-- SHADOW_ERROR("Odd p2m page count c=%#lx t=%"PRtype_info"\n",
-+ SHADOW_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n",
-+ d->domain_id, mfn_x(page_to_mfn(pg)),
-+ owner ? owner->domain_id : DOMID_INVALID,
- pg->count_info, pg->u.inuse.type_info);
-+ pg->count_info &= ~PGC_count_mask;
-+ page_set_owner(pg, NULL);
- }
-- pg->count_info &= ~PGC_count_mask;
- pg->u.sh.type = SH_type_p2m_table; /* p2m code reuses type-info */
-- page_set_owner(pg, NULL);
-
- /* This is called both from the p2m code (which never holds the
- * paging lock) and the log-dirty code (which always does). */
-@@ -3132,7 +3129,9 @@ int shadow_enable(struct domain *d, u32
- e = __map_domain_page(pg);
- write_32bit_pse_identmap(e);
- unmap_domain_page(e);
-+ pg->count_info = 1;
- pg->u.inuse.type_info = PGT_l2_page_table | 1 | PGT_validated;
-+ page_set_owner(pg, d);
- }
-
- paging_lock(d);
-@@ -3170,7 +3169,11 @@ int shadow_enable(struct domain *d, u32
- if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
- p2m_teardown(p2m);
- if ( rv != 0 && pg != NULL )
-+ {
-+ pg->count_info &= ~PGC_count_mask;
-+ page_set_owner(pg, NULL);
- shadow_free_p2m_page(d, pg);
-+ }
- domain_unpause(d);
- return rv;
- }
-@@ -3279,7 +3282,22 @@ out:
-
- /* Must be called outside the lock */
- if ( unpaged_pagetable )
-+ {
-+ if ( page_get_owner(unpaged_pagetable) == d &&
-+ (unpaged_pagetable->count_info & PGC_count_mask) == 1 )
-+ {
-+ unpaged_pagetable->count_info &= ~PGC_count_mask;
-+ page_set_owner(unpaged_pagetable, NULL);
-+ }
-+ /* Complain here in cases where shadow_free_p2m_page() won't. */
-+ else if ( !page_get_owner(unpaged_pagetable) &&
-+ !(unpaged_pagetable->count_info & PGC_count_mask) )
-+ SHADOW_ERROR("d%d: Odd unpaged pt %"PRI_mfn" c=%lx t=%"PRtype_info"\n",
-+ d->domain_id, mfn_x(page_to_mfn(unpaged_pagetable)),
-+ unpaged_pagetable->count_info,
-+ unpaged_pagetable->u.inuse.type_info);
- shadow_free_p2m_page(d, unpaged_pagetable);
-+ }
- }
-
- void shadow_final_teardown(struct domain *d)
diff --git a/system/xen/xsa/xsa249.patch b/system/xen/xsa/xsa249.patch
deleted file mode 100644
index ecfa4305e5bfb..0000000000000
--- a/system/xen/xsa/xsa249.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/shadow: fix refcount overflow check
-
-Commit c385d27079 ("x86 shadow: for multi-page shadows, explicitly track
-the first page") reduced the refcount width to 25, without adjusting the
-overflow check. Eliminate the disconnect by using a manifest constant.
-
-Interestingly, up to commit 047782fa01 ("Out-of-sync L1 shadows: OOS
-snapshot") the refcount was 27 bits wide, yet the check was already
-using 26.
-
-This is XSA-249.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
-Reviewed-by: Tim Deegan <tim@xen.org>
----
-v2: Simplify expression back to the style it was.
-
---- a/xen/arch/x86/mm/shadow/private.h
-+++ b/xen/arch/x86/mm/shadow/private.h
-@@ -529,7 +529,7 @@ static inline int sh_get_ref(struct doma
- x = sp->u.sh.count;
- nx = x + 1;
-
-- if ( unlikely(nx >= 1U<<26) )
-+ if ( unlikely(nx >= (1U << PAGE_SH_REFCOUNT_WIDTH)) )
- {
- SHADOW_PRINTK("shadow ref overflow, gmfn=%lx smfn=%lx\n",
- __backpointer(sp), mfn_x(smfn));
---- a/xen/include/asm-x86/mm.h
-+++ b/xen/include/asm-x86/mm.h
-@@ -82,7 +82,8 @@ struct page_info
- unsigned long type:5; /* What kind of shadow is this? */
- unsigned long pinned:1; /* Is the shadow pinned? */
- unsigned long head:1; /* Is this the first page of the shadow? */
-- unsigned long count:25; /* Reference count */
-+#define PAGE_SH_REFCOUNT_WIDTH 25
-+ unsigned long count:PAGE_SH_REFCOUNT_WIDTH; /* Reference count */
- } sh;
-
- /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
diff --git a/system/xen/xsa/xsa250.patch b/system/xen/xsa/xsa250.patch
deleted file mode 100644
index 26aeb33fedaf7..0000000000000
--- a/system/xen/xsa/xsa250.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/shadow: fix ref-counting error handling
-
-The old-Linux handling in shadow_set_l4e() mistakenly ORed together the
-results of sh_get_ref() and sh_pin(). As the latter failing is not a
-correctness problem, simply ignore its return value.
-
-In sh_set_toplevel_shadow() a failing sh_get_ref() must not be
-accompanied by installing the entry, despite the domain being crashed.
-
-This is XSA-250.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Tim Deegan <tim@xen.org>
-
---- a/xen/arch/x86/mm/shadow/multi.c
-+++ b/xen/arch/x86/mm/shadow/multi.c
-@@ -923,7 +923,7 @@ static int shadow_set_l4e(struct domain
- shadow_l4e_t new_sl4e,
- mfn_t sl4mfn)
- {
-- int flags = 0, ok;
-+ int flags = 0;
- shadow_l4e_t old_sl4e;
- paddr_t paddr;
- ASSERT(sl4e != NULL);
-@@ -938,15 +938,16 @@ static int shadow_set_l4e(struct domain
- {
- /* About to install a new reference */
- mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e);
-- ok = sh_get_ref(d, sl3mfn, paddr);
-- /* Are we pinning l3 shadows to handle wierd linux behaviour? */
-- if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) )
-- ok |= sh_pin(d, sl3mfn);
-- if ( !ok )
-+
-+ if ( !sh_get_ref(d, sl3mfn, paddr) )
- {
- domain_crash(d);
- return SHADOW_SET_ERROR;
- }
-+
-+ /* Are we pinning l3 shadows to handle weird Linux behaviour? */
-+ if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) )
-+ sh_pin(d, sl3mfn);
- }
-
- /* Write the new entry */
-@@ -3965,14 +3966,15 @@ sh_set_toplevel_shadow(struct vcpu *v,
-
- /* Take a ref to this page: it will be released in sh_detach_old_tables()
- * or the next call to set_toplevel_shadow() */
-- if ( !sh_get_ref(d, smfn, 0) )
-+ if ( sh_get_ref(d, smfn, 0) )
-+ new_entry = pagetable_from_mfn(smfn);
-+ else
- {
- SHADOW_ERROR("can't install %#lx as toplevel shadow\n", mfn_x(smfn));
- domain_crash(d);
-+ new_entry = pagetable_null();
- }
-
-- new_entry = pagetable_from_mfn(smfn);
--
- install_new_entry:
- /* Done. Install it */
- SHADOW_PRINTK("%u/%u [%u] gmfn %#"PRI_mfn" smfn %#"PRI_mfn"\n",
diff --git a/system/xen/xsa/xsa251.patch b/system/xen/xsa/xsa251.patch
deleted file mode 100644
index 582ef622eb1a2..0000000000000
--- a/system/xen/xsa/xsa251.patch
+++ /dev/null
@@ -1,21 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/paging: don't unconditionally BUG() on finding SHARED_M2P_ENTRY
-
-PV guests can fully control the values written into the P2M.
-
-This is XSA-251.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/mm/paging.c
-+++ b/xen/arch/x86/mm/paging.c
-@@ -274,7 +274,7 @@ void paging_mark_pfn_dirty(struct domain
- return;
-
- /* Shared MFNs should NEVER be marked dirty */
-- BUG_ON(SHARED_M2P(pfn_x(pfn)));
-+ BUG_ON(paging_mode_translate(d) && SHARED_M2P(pfn_x(pfn)));
-
- /*
- * Values with the MSB set denote MFNs that aren't really part of the
diff --git a/system/xen/xsa/xsa252.patch b/system/xen/xsa/xsa252.patch
new file mode 100644
index 0000000000000..8615928142a62
--- /dev/null
+++ b/system/xen/xsa/xsa252.patch
@@ -0,0 +1,27 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: memory: don't implicitly unpin for decrease-reservation
+
+It very likely was a mistake (copy-and-paste from domain cleanup code)
+to implicitly unpin here: The caller should really unpin itself before
+(or after, if they so wish) requesting the page to be removed.
+
+This is XSA-252.
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/common/memory.c
++++ b/xen/common/memory.c
+@@ -357,11 +357,6 @@ int guest_remove_page(struct domain *d,
+
+ rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
+
+-#ifdef _PGT_pinned
+- if ( !rc && test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
+- put_page_and_type(page);
+-#endif
+-
+ /*
+ * With the lack of an IOMMU on some platforms, domains with DMA-capable
+ * device must retrieve the same pfn when the hypercall populate_physmap
diff --git a/system/xen/xsa/xsa253.patch b/system/xen/xsa/xsa253.patch
new file mode 100644
index 0000000000000..19e4269358522
--- /dev/null
+++ b/system/xen/xsa/xsa253.patch
@@ -0,0 +1,26 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/msr: Free msr_vcpu_policy during vcpu destruction
+
+c/s 4187f79dc7 "x86/msr: introduce struct msr_vcpu_policy" introduced a
+per-vcpu memory allocation, but failed to free it in the clean vcpu
+destruction case.
+
+This is XSA-253
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
+index b17468c..0ae715d 100644
+--- a/xen/arch/x86/domain.c
++++ b/xen/arch/x86/domain.c
+@@ -382,6 +382,9 @@ void vcpu_destroy(struct vcpu *v)
+
+ vcpu_destroy_fpu(v);
+
++ xfree(v->arch.msr);
++ v->arch.msr = NULL;
++
+ if ( !is_idle_domain(v->domain) )
+ vpmu_destroy(v);
+
diff --git a/system/xen/xsa/xsa255-1.patch b/system/xen/xsa/xsa255-1.patch
new file mode 100644
index 0000000000000..f8bba9e516c2f
--- /dev/null
+++ b/system/xen/xsa/xsa255-1.patch
@@ -0,0 +1,133 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: gnttab/ARM: don't corrupt shared GFN array
+
+... by writing status GFNs to it. Introduce a second array instead.
+Also implement gnttab_status_gmfn() properly now that the information is
+suitably being tracked.
+
+While touching it anyway, remove a misguided (but luckily benign) upper
+bound check from gnttab_shared_gmfn(): We should never access beyond the
+bounds of that array.
+
+This is part of XSA-255.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+---
+v3: Don't init the ARM GFN arrays to zero anymore, use INVALID_GFN.
+v2: New.
+
+--- a/xen/common/grant_table.c
++++ b/xen/common/grant_table.c
+@@ -3775,6 +3775,7 @@ int gnttab_map_frame(struct domain *d, u
+ {
+ int rc = 0;
+ struct grant_table *gt = d->grant_table;
++ bool status = false;
+
+ grant_write_lock(gt);
+
+@@ -3785,6 +3786,7 @@ int gnttab_map_frame(struct domain *d, u
+ (idx & XENMAPIDX_grant_table_status) )
+ {
+ idx &= ~XENMAPIDX_grant_table_status;
++ status = true;
+ if ( idx < nr_status_frames(gt) )
+ *mfn = _mfn(virt_to_mfn(gt->status[idx]));
+ else
+@@ -3802,7 +3804,7 @@ int gnttab_map_frame(struct domain *d, u
+ }
+
+ if ( !rc )
+- gnttab_set_frame_gfn(gt, idx, gfn);
++ gnttab_set_frame_gfn(gt, status, idx, gfn);
+
+ grant_write_unlock(gt);
+
+--- a/xen/include/asm-arm/grant_table.h
++++ b/xen/include/asm-arm/grant_table.h
+@@ -9,7 +9,8 @@
+ #define INITIAL_NR_GRANT_FRAMES 1U
+
+ struct grant_table_arch {
+- gfn_t *gfn;
++ gfn_t *shared_gfn;
++ gfn_t *status_gfn;
+ };
+
+ void gnttab_clear_flag(unsigned long nr, uint16_t *addr);
+@@ -21,7 +22,6 @@ int replace_grant_host_mapping(unsigned
+ unsigned long new_gpaddr, unsigned int flags);
+ void gnttab_mark_dirty(struct domain *d, unsigned long l);
+ #define gnttab_create_status_page(d, t, i) do {} while (0)
+-#define gnttab_status_gmfn(d, t, i) (0)
+ #define gnttab_release_host_mappings(domain) 1
+ static inline int replace_grant_supported(void)
+ {
+@@ -42,19 +42,35 @@ static inline unsigned int gnttab_dom0_m
+
+ #define gnttab_init_arch(gt) \
+ ({ \
+- (gt)->arch.gfn = xzalloc_array(gfn_t, (gt)->max_grant_frames); \
+- ( (gt)->arch.gfn ? 0 : -ENOMEM ); \
++ unsigned int ngf_ = (gt)->max_grant_frames; \
++ unsigned int nsf_ = grant_to_status_frames(ngf_); \
++ \
++ (gt)->arch.shared_gfn = xmalloc_array(gfn_t, ngf_); \
++ (gt)->arch.status_gfn = xmalloc_array(gfn_t, nsf_); \
++ if ( (gt)->arch.shared_gfn && (gt)->arch.status_gfn ) \
++ { \
++ while ( ngf_-- ) \
++ (gt)->arch.shared_gfn[ngf_] = INVALID_GFN; \
++ while ( nsf_-- ) \
++ (gt)->arch.status_gfn[nsf_] = INVALID_GFN; \
++ } \
++ else \
++ gnttab_destroy_arch(gt); \
++ (gt)->arch.shared_gfn ? 0 : -ENOMEM; \
+ })
+
+ #define gnttab_destroy_arch(gt) \
+ do { \
+- xfree((gt)->arch.gfn); \
+- (gt)->arch.gfn = NULL; \
++ xfree((gt)->arch.shared_gfn); \
++ (gt)->arch.shared_gfn = NULL; \
++ xfree((gt)->arch.status_gfn); \
++ (gt)->arch.status_gfn = NULL; \
+ } while ( 0 )
+
+-#define gnttab_set_frame_gfn(gt, idx, gfn) \
++#define gnttab_set_frame_gfn(gt, st, idx, gfn) \
+ do { \
+- (gt)->arch.gfn[idx] = gfn; \
++ ((st) ? (gt)->arch.status_gfn : (gt)->arch.shared_gfn)[idx] = \
++ (gfn); \
+ } while ( 0 )
+
+ #define gnttab_create_shared_page(d, t, i) \
+@@ -65,8 +81,10 @@ static inline unsigned int gnttab_dom0_m
+ } while ( 0 )
+
+ #define gnttab_shared_gmfn(d, t, i) \
+- ( ((i >= nr_grant_frames(t)) && \
+- (i < (t)->max_grant_frames))? 0 : gfn_x((t)->arch.gfn[i]))
++ gfn_x(((i) >= nr_grant_frames(t)) ? INVALID_GFN : (t)->arch.shared_gfn[i])
++
++#define gnttab_status_gmfn(d, t, i) \
++ gfn_x(((i) >= nr_status_frames(t)) ? INVALID_GFN : (t)->arch.status_gfn[i])
+
+ #define gnttab_need_iommu_mapping(d) \
+ (is_domain_direct_mapped(d) && need_iommu(d))
+--- a/xen/include/asm-x86/grant_table.h
++++ b/xen/include/asm-x86/grant_table.h
+@@ -46,7 +46,7 @@ static inline unsigned int gnttab_dom0_m
+
+ #define gnttab_init_arch(gt) 0
+ #define gnttab_destroy_arch(gt) do {} while ( 0 )
+-#define gnttab_set_frame_gfn(gt, idx, gfn) do {} while ( 0 )
++#define gnttab_set_frame_gfn(gt, st, idx, gfn) do {} while ( 0 )
+
+ #define gnttab_create_shared_page(d, t, i) \
+ do { \
diff --git a/system/xen/xsa/xsa255-2.patch b/system/xen/xsa/xsa255-2.patch
new file mode 100644
index 0000000000000..402b6efe98c26
--- /dev/null
+++ b/system/xen/xsa/xsa255-2.patch
@@ -0,0 +1,167 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: gnttab: don't blindly free status pages upon version change
+
+There may still be active mappings, which would trigger the respective
+BUG_ON(). Split the loop into one dealing with the page attributes and
+the second (when the first fully passed) freeing the pages. Return an
+error if any pages still have pending references.
+
+This is part of XSA-255.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+---
+v4: Add gprintk(XENLOG_ERR, ...) to domain_crash() invocations.
+v3: Call guest_physmap_remove_page() from gnttab_map_frame(), making the
+ code unconditional at the same time. Re-base over changes to first
+ patch.
+v2: Also deal with translated guests.
+
+--- a/xen/common/grant_table.c
++++ b/xen/common/grant_table.c
+@@ -1636,23 +1636,74 @@ status_alloc_failed:
+ return -ENOMEM;
+ }
+
+-static void
++static int
+ gnttab_unpopulate_status_frames(struct domain *d, struct grant_table *gt)
+ {
+- int i;
++ unsigned int i;
+
+ for ( i = 0; i < nr_status_frames(gt); i++ )
+ {
+ struct page_info *pg = virt_to_page(gt->status[i]);
++ gfn_t gfn = gnttab_get_frame_gfn(gt, true, i);
++
++ /*
++ * For translated domains, recovering from failure after partial
++ * changes were made is more complicated than it seems worth
++ * implementing at this time. Hence respective error paths below
++ * crash the domain in such a case.
++ */
++ if ( paging_mode_translate(d) )
++ {
++ int rc = gfn_eq(gfn, INVALID_GFN)
++ ? 0
++ : guest_physmap_remove_page(d, gfn,
++ _mfn(page_to_mfn(pg)), 0);
++
++ if ( rc )
++ {
++ gprintk(XENLOG_ERR,
++ "Could not remove status frame %u (GFN %#lx) from P2M\n",
++ i, gfn_x(gfn));
++ domain_crash(d);
++ return rc;
++ }
++ gnttab_set_frame_gfn(gt, true, i, INVALID_GFN);
++ }
+
+ BUG_ON(page_get_owner(pg) != d);
+ if ( test_and_clear_bit(_PGC_allocated, &pg->count_info) )
+ put_page(pg);
+- BUG_ON(pg->count_info & ~PGC_xen_heap);
++
++ if ( pg->count_info & ~PGC_xen_heap )
++ {
++ if ( paging_mode_translate(d) )
++ {
++ gprintk(XENLOG_ERR,
++ "Wrong page state %#lx of status frame %u (GFN %#lx)\n",
++ pg->count_info, i, gfn_x(gfn));
++ domain_crash(d);
++ }
++ else
++ {
++ if ( get_page(pg, d) )
++ set_bit(_PGC_allocated, &pg->count_info);
++ while ( i-- )
++ gnttab_create_status_page(d, gt, i);
++ }
++ return -EBUSY;
++ }
++
++ page_set_owner(pg, NULL);
++ }
++
++ for ( i = 0; i < nr_status_frames(gt); i++ )
++ {
+ free_xenheap_page(gt->status[i]);
+ gt->status[i] = NULL;
+ }
+ gt->nr_status_frames = 0;
++
++ return 0;
+ }
+
+ /*
+@@ -2962,8 +3013,9 @@ gnttab_set_version(XEN_GUEST_HANDLE_PARA
+ break;
+ }
+
+- if ( op.version < 2 && gt->gt_version == 2 )
+- gnttab_unpopulate_status_frames(currd, gt);
++ if ( op.version < 2 && gt->gt_version == 2 &&
++ (res = gnttab_unpopulate_status_frames(currd, gt)) != 0 )
++ goto out_unlock;
+
+ /* Make sure there's no crud left over from the old version. */
+ for ( i = 0; i < nr_grant_frames(gt); i++ )
+@@ -3803,6 +3855,11 @@ int gnttab_map_frame(struct domain *d, u
+ rc = -EINVAL;
+ }
+
++ if ( !rc && paging_mode_translate(d) &&
++ !gfn_eq(gnttab_get_frame_gfn(gt, status, idx), INVALID_GFN) )
++ rc = guest_physmap_remove_page(d, gnttab_get_frame_gfn(gt, status, idx),
++ *mfn, 0);
++
+ if ( !rc )
+ gnttab_set_frame_gfn(gt, status, idx, gfn);
+
+--- a/xen/include/asm-arm/grant_table.h
++++ b/xen/include/asm-arm/grant_table.h
+@@ -73,6 +73,11 @@ static inline unsigned int gnttab_dom0_m
+ (gfn); \
+ } while ( 0 )
+
++#define gnttab_get_frame_gfn(gt, st, idx) ({ \
++ _gfn((st) ? gnttab_status_gmfn(NULL, gt, idx) \
++ : gnttab_shared_gmfn(NULL, gt, idx)); \
++})
++
+ #define gnttab_create_shared_page(d, t, i) \
+ do { \
+ share_xen_page_with_guest( \
+--- a/xen/include/asm-x86/grant_table.h
++++ b/xen/include/asm-x86/grant_table.h
+@@ -47,6 +47,12 @@ static inline unsigned int gnttab_dom0_m
+ #define gnttab_init_arch(gt) 0
+ #define gnttab_destroy_arch(gt) do {} while ( 0 )
+ #define gnttab_set_frame_gfn(gt, st, idx, gfn) do {} while ( 0 )
++#define gnttab_get_frame_gfn(gt, st, idx) ({ \
++ unsigned long mfn_ = (st) ? gnttab_status_mfn(gt, idx) \
++ : gnttab_shared_mfn(gt, idx); \
++ unsigned long gpfn_ = get_gpfn_from_mfn(mfn_); \
++ VALID_M2P(gpfn_) ? _gfn(gpfn_) : INVALID_GFN; \
++})
+
+ #define gnttab_create_shared_page(d, t, i) \
+ do { \
+@@ -63,11 +69,11 @@ static inline unsigned int gnttab_dom0_m
+ } while ( 0 )
+
+
+-#define gnttab_shared_mfn(d, t, i) \
++#define gnttab_shared_mfn(t, i) \
+ ((virt_to_maddr((t)->shared_raw[i]) >> PAGE_SHIFT))
+
+ #define gnttab_shared_gmfn(d, t, i) \
+- (mfn_to_gmfn(d, gnttab_shared_mfn(d, t, i)))
++ (mfn_to_gmfn(d, gnttab_shared_mfn(t, i)))
+
+
+ #define gnttab_status_mfn(t, i) \
diff --git a/system/xen/xsa/xsa256.patch b/system/xen/xsa/xsa256.patch
new file mode 100644
index 0000000000000..50ff24e17bd32
--- /dev/null
+++ b/system/xen/xsa/xsa256.patch
@@ -0,0 +1,40 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/hvm: Disallow the creation of HVM domains without Local APIC emulation
+
+There are multiple problems, not necesserily limited to:
+
+ * Guests which configure event channels via hvmop_set_evtchn_upcall_vector(),
+ or which hit %cr8 emulation will cause Xen to fall over a NULL vlapic->regs
+ pointer.
+
+ * On Intel hardware, disabling the TPR_SHADOW execution control without
+ reenabling CR8_{LOAD,STORE} interception means that the guests %cr8
+ accesses interact with the real TPR. Amongst other things, setting the
+ real TPR to 0xf blocks even IPIs from interrupting this CPU.
+
+ * On hardware which sets up the use of Interrupt Posting, including
+ IOMMU-Posting, guests run without the appropriate non-root configuration,
+ which at a minimum will result in dropped interrupts.
+
+Whether no-LAPIC mode is of any use at all remains to be seen.
+
+This is XSA-256.
+
+Reported-by: Ian Jackson <ian.jackson@eu.citrix.com>
+Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
+index f93327b..f65fc12 100644
+--- a/xen/arch/x86/domain.c
++++ b/xen/arch/x86/domain.c
+@@ -413,7 +413,7 @@ static bool emulation_flags_ok(const struct domain *d, uint32_t emflags)
+ if ( is_hardware_domain(d) &&
+ emflags != (XEN_X86_EMU_LAPIC|XEN_X86_EMU_IOAPIC) )
+ return false;
+- if ( !is_hardware_domain(d) && emflags &&
++ if ( !is_hardware_domain(d) &&
+ emflags != XEN_X86_EMU_ALL && emflags != XEN_X86_EMU_LAPIC )
+ return false;
+ }