aboutsummaryrefslogtreecommitdiff
path: root/target/i386/hvf/x86.c
diff options
context:
space:
mode:
authorPhilippe Mathieu-Daudé <philmd@linaro.org>2023-06-21 13:15:27 +0200
committerPhilippe Mathieu-Daudé <philmd@linaro.org>2023-06-28 14:14:22 +0200
commit3b295bcb3289afec09508786032f4ba5d657a934 (patch)
tree37f11b0092601dd6303ab87a3b5c0fb2da713914 /target/i386/hvf/x86.c
parenta7159244285058c049ad53a42b3dc7b24809faaa (diff)
accel: Rename HVF 'struct hvf_vcpu_state' -> AccelCPUState
We want all accelerators to share the same opaque pointer in CPUState. Rename the 'hvf_vcpu_state' structure as 'AccelCPUState'. Use the generic 'accel' field of CPUState instead of 'hvf'. Replace g_malloc0() by g_new0() for readability. Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Tested-by: Peter Maydell <peter.maydell@linaro.org> Message-Id: <20230624174121.11508-17-philmd@linaro.org>
Diffstat (limited to 'target/i386/hvf/x86.c')
-rw-r--r--target/i386/hvf/x86.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c
index d086584f26..8ceea6398e 100644
--- a/target/i386/hvf/x86.c
+++ b/target/i386/hvf/x86.c
@@ -61,11 +61,11 @@ bool x86_read_segment_descriptor(struct CPUState *cpu,
}
if (GDT_SEL == sel.ti) {
- base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
- limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
+ base = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE);
+ limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT);
} else {
- base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
- limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
+ base = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE);
+ limit = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_LIMIT);
}
if (sel.index * 8 >= limit) {
@@ -84,11 +84,11 @@ bool x86_write_segment_descriptor(struct CPUState *cpu,
uint32_t limit;
if (GDT_SEL == sel.ti) {
- base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
- limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
+ base = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE);
+ limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT);
} else {
- base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
- limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
+ base = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE);
+ limit = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_LIMIT);
}
if (sel.index * 8 >= limit) {
@@ -102,8 +102,8 @@ bool x86_write_segment_descriptor(struct CPUState *cpu,
bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
int gate)
{
- target_ulong base = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_BASE);
- uint32_t limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_LIMIT);
+ target_ulong base = rvmcs(cpu->accel->fd, VMCS_GUEST_IDTR_BASE);
+ uint32_t limit = rvmcs(cpu->accel->fd, VMCS_GUEST_IDTR_LIMIT);
memset(idt_desc, 0, sizeof(*idt_desc));
if (gate * 8 >= limit) {
@@ -117,7 +117,7 @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
bool x86_is_protected(struct CPUState *cpu)
{
- uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
+ uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0);
return cr0 & CR0_PE_MASK;
}
@@ -135,7 +135,7 @@ bool x86_is_v8086(struct CPUState *cpu)
bool x86_is_long_mode(struct CPUState *cpu)
{
- return rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
+ return rvmcs(cpu->accel->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
}
bool x86_is_long64_mode(struct CPUState *cpu)
@@ -148,13 +148,13 @@ bool x86_is_long64_mode(struct CPUState *cpu)
bool x86_is_paging_mode(struct CPUState *cpu)
{
- uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
+ uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0);
return cr0 & CR0_PG_MASK;
}
bool x86_is_pae_enabled(struct CPUState *cpu)
{
- uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
+ uint64_t cr4 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR4);
return cr4 & CR4_PAE_MASK;
}