diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2016-12-15 03:13:05 +0300 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2016-12-22 16:01:04 +0100 |
commit | 6c7c3c21f95dd9af8a0691c0dd29b07247984122 (patch) | |
tree | ee8ef96cdff2c67cb89aa7217edc106a4bfc2be3 /target/i386/arch_memory_mapping.c | |
parent | c52ab08aee6f7d4717fc6b517174043126bd302f (diff) |
x86: implement la57 paging mode
The new paging more is extension of IA32e mode with more additional page
table level.
It brings support of 57-bit vitrual address space (128PB) and 52-bit
physical address space (4PB).
The structure of new page table level is identical to pml4.
The feature is enumerated with CPUID.(EAX=07H, ECX=0):ECX[bit 16].
CR4.LA57[bit 12] need to be set when pageing enables to activate 5-level
paging mode.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Message-Id: <20161215001305.146807-1-kirill.shutemov@linux.intel.com>
[Drop changes to target-i386/translate.c. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target/i386/arch_memory_mapping.c')
-rw-r--r-- | target/i386/arch_memory_mapping.c | 42 |
1 files changed, 37 insertions, 5 deletions
diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c index 88f341e1bb..826aee597b 100644 --- a/target/i386/arch_memory_mapping.c +++ b/target/i386/arch_memory_mapping.c @@ -220,7 +220,8 @@ static void walk_pdpe(MemoryMappingList *list, AddressSpace *as, /* IA-32e Paging */ static void walk_pml4e(MemoryMappingList *list, AddressSpace *as, - hwaddr pml4e_start_addr, int32_t a20_mask) + hwaddr pml4e_start_addr, int32_t a20_mask, + target_ulong start_line_addr) { hwaddr pml4e_addr, pdpe_start_addr; uint64_t pml4e; @@ -236,11 +237,34 @@ static void walk_pml4e(MemoryMappingList *list, AddressSpace *as, continue; } - line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48); + line_addr = start_line_addr | ((i & 0x1ffULL) << 39); pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask; walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr); } } + +static void walk_pml5e(MemoryMappingList *list, AddressSpace *as, + hwaddr pml5e_start_addr, int32_t a20_mask) +{ + hwaddr pml5e_addr, pml4e_start_addr; + uint64_t pml5e; + target_ulong line_addr; + int i; + + for (i = 0; i < 512; i++) { + pml5e_addr = (pml5e_start_addr + i * 8) & a20_mask; + pml5e = address_space_ldq(as, pml5e_addr, MEMTXATTRS_UNSPECIFIED, + NULL); + if (!(pml5e & PG_PRESENT_MASK)) { + /* not present */ + continue; + } + + line_addr = (0x7fULL << 57) | ((i & 0x1ffULL) << 48); + pml4e_start_addr = (pml5e & PLM4_ADDR_MASK) & a20_mask; + walk_pml4e(list, as, pml4e_start_addr, a20_mask, line_addr); + } +} #endif void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list, @@ -257,10 +281,18 @@ void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list, if (env->cr[4] & CR4_PAE_MASK) { #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { - hwaddr pml4e_addr; + if (env->cr[4] & CR4_LA57_MASK) { + hwaddr pml5e_addr; + + pml5e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask; + walk_pml5e(list, cs->as, pml5e_addr, env->a20_mask); + } else { + hwaddr pml4e_addr; - pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask; - walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask); + pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask; + walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask, + 0xffffULL << 48); + } } else #endif { |