diff options
Diffstat (limited to 'system/xen/xsa/xsa347-4.13-1.patch')
-rw-r--r-- | system/xen/xsa/xsa347-4.13-1.patch | 149 |
1 files changed, 0 insertions, 149 deletions
diff --git a/system/xen/xsa/xsa347-4.13-1.patch b/system/xen/xsa/xsa347-4.13-1.patch deleted file mode 100644 index e9f31a151f016..0000000000000 --- a/system/xen/xsa/xsa347-4.13-1.patch +++ /dev/null @@ -1,149 +0,0 @@ -From: Jan Beulich <jbeulich@suse.com> -Subject: AMD/IOMMU: convert amd_iommu_pte from struct to union - -This is to add a "raw" counterpart to the bitfield equivalent. Take the -opportunity and - - convert fields to bool / unsigned int, - - drop the naming of the reserved field, - - shorten the names of the ignored ones. - -This is part of XSA-347. - -Signed-off-by: Jan Beulich <jbeulich@suse.com> -Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> -Reviewed-by: Paul Durrant <paul@xen.org> - ---- a/xen/drivers/passthrough/amd/iommu_map.c -+++ b/xen/drivers/passthrough/amd/iommu_map.c -@@ -38,7 +38,7 @@ static unsigned int pfn_to_pde_idx(unsig - static unsigned int clear_iommu_pte_present(unsigned long l1_mfn, - unsigned long dfn) - { -- struct amd_iommu_pte *table, *pte; -+ union amd_iommu_pte *table, *pte; - unsigned int flush_flags; - - table = map_domain_page(_mfn(l1_mfn)); -@@ -52,7 +52,7 @@ static unsigned int clear_iommu_pte_pres - return flush_flags; - } - --static unsigned int set_iommu_pde_present(struct amd_iommu_pte *pte, -+static unsigned int set_iommu_pde_present(union amd_iommu_pte *pte, - unsigned long next_mfn, - unsigned int next_level, bool iw, - bool ir) -@@ -87,7 +87,7 @@ static unsigned int set_iommu_pte_presen - int pde_level, - bool iw, bool ir) - { -- struct amd_iommu_pte *table, *pde; -+ union amd_iommu_pte *table, *pde; - unsigned int flush_flags; - - table = map_domain_page(_mfn(pt_mfn)); -@@ -178,7 +178,7 @@ void iommu_dte_set_guest_cr3(struct amd_ - static int iommu_pde_from_dfn(struct domain *d, unsigned long dfn, - unsigned long pt_mfn[], bool map) - { -- struct amd_iommu_pte *pde, *next_table_vaddr; -+ union amd_iommu_pte *pde, *next_table_vaddr; - unsigned long next_table_mfn; - unsigned int level; - struct page_info *table; -@@ -458,7 +458,7 @@ int __init amd_iommu_quarantine_init(str - unsigned long end_gfn = - 1ul << (DEFAULT_DOMAIN_ADDRESS_WIDTH - PAGE_SHIFT); - unsigned int level = amd_iommu_get_paging_mode(end_gfn); -- struct amd_iommu_pte *table; -+ union amd_iommu_pte *table; - - if ( hd->arch.root_table ) - { -@@ -489,7 +489,7 @@ int __init amd_iommu_quarantine_init(str - - for ( i = 0; i < PTE_PER_TABLE_SIZE; i++ ) - { -- struct amd_iommu_pte *pde = &table[i]; -+ union amd_iommu_pte *pde = &table[i]; - - /* - * PDEs are essentially a subset of PTEs, so this function ---- a/xen/drivers/passthrough/amd/pci_amd_iommu.c -+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c -@@ -390,7 +390,7 @@ static void deallocate_next_page_table(s - - static void deallocate_page_table(struct page_info *pg) - { -- struct amd_iommu_pte *table_vaddr; -+ union amd_iommu_pte *table_vaddr; - unsigned int index, level = PFN_ORDER(pg); - - PFN_ORDER(pg) = 0; -@@ -405,7 +405,7 @@ static void deallocate_page_table(struct - - for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ ) - { -- struct amd_iommu_pte *pde = &table_vaddr[index]; -+ union amd_iommu_pte *pde = &table_vaddr[index]; - - if ( pde->mfn && pde->next_level && pde->pr ) - { -@@ -557,7 +557,7 @@ static void amd_dump_p2m_table_level(str - paddr_t gpa, int indent) - { - paddr_t address; -- struct amd_iommu_pte *table_vaddr; -+ const union amd_iommu_pte *table_vaddr; - int index; - - if ( level < 1 ) -@@ -573,7 +573,7 @@ static void amd_dump_p2m_table_level(str - - for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ ) - { -- struct amd_iommu_pte *pde = &table_vaddr[index]; -+ const union amd_iommu_pte *pde = &table_vaddr[index]; - - if ( !(index % 2) ) - process_pending_softirqs(); ---- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h -+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h -@@ -465,20 +465,23 @@ union amd_iommu_x2apic_control { - #define IOMMU_PAGE_TABLE_U32_PER_ENTRY (IOMMU_PAGE_TABLE_ENTRY_SIZE / 4) - #define IOMMU_PAGE_TABLE_ALIGNMENT 4096 - --struct amd_iommu_pte { -- uint64_t pr:1; -- uint64_t ignored0:4; -- uint64_t a:1; -- uint64_t d:1; -- uint64_t ignored1:2; -- uint64_t next_level:3; -- uint64_t mfn:40; -- uint64_t reserved:7; -- uint64_t u:1; -- uint64_t fc:1; -- uint64_t ir:1; -- uint64_t iw:1; -- uint64_t ignored2:1; -+union amd_iommu_pte { -+ uint64_t raw; -+ struct { -+ bool pr:1; -+ unsigned int ign0:4; -+ bool a:1; -+ bool d:1; -+ unsigned int ign1:2; -+ unsigned int next_level:3; -+ uint64_t mfn:40; -+ unsigned int :7; -+ bool u:1; -+ bool fc:1; -+ bool ir:1; -+ bool iw:1; -+ unsigned int ign2:1; -+ }; - }; - - /* Paging modes */ |