aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2022-03-15 18:58:41 +0000
committerPeter Maydell <peter.maydell@linaro.org>2022-03-15 18:58:41 +0000
commit22a3a45ade8d331f3c318afeb0374c94129e55b4 (patch)
tree046a894e945c680c58429b345969603c569373b8 /target
parente2fb7d8aa218256793df99571d16f92074258447 (diff)
parentc82b7ef16f3efa59e28f821f25a9c084ef84ea9d (diff)
Merge tag 'darwin-20220315' of https://github.com/philmd/qemu into staging
Darwin-based host patches - Remove various build warnings - Fix building with modules on macOS - Fix mouse/keyboard GUI interactions # gpg: Signature made Tue 15 Mar 2022 12:52:19 GMT # gpg: using RSA key FAABE75E12917221DCFD6BB2E3E32C2CDEADC0DE # gpg: Good signature from "Philippe Mathieu-Daudé (F4BUG) <f4bug@amsat.org>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: FAAB E75E 1291 7221 DCFD 6BB2 E3E3 2C2C DEAD C0DE * tag 'darwin-20220315' of https://github.com/philmd/qemu: (21 commits) MAINTAINERS: Volunteer to maintain Darwin-based hosts support ui/cocoa: add option to swap Option and Command ui/cocoa: capture all keys and combos when mouse is grabbed ui/cocoa: release mouse when user switches away from QEMU window ui/cocoa: add option to disable left-command forwarding to guest ui/cocoa: Constify qkeycode translation arrays configure: Pass filtered QEMU_OBJCFLAGS to meson meson: Log QEMU_CXXFLAGS content in summary meson: Resolve the entitlement.sh script once for good osdep: Avoid using Clang-specific __builtin_available() audio: Rename coreaudio extension to use Objective-C compiler coreaudio: Always return 0 in handle_voice_change audio: Log context for audio bug audio/dbus: Fix building with modules on macOS audio/coreaudio: Remove a deprecation warning on macOS 12 block/file-posix: Remove a deprecation warning on macOS 12 hvf: Remove deprecated hv_vcpu_flush() calls hvf: Make hvf_get_segments() / hvf_put_segments() local hvf: Use standard CR0 and CR4 register definitions tests/fp/berkeley-testfloat-3: Ignore ignored #pragma directives ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/i386/hvf/vmx.h19
-rw-r--r--target/i386/hvf/x86.c6
-rw-r--r--target/i386/hvf/x86.h34
-rw-r--r--target/i386/hvf/x86_mmu.c2
-rw-r--r--target/i386/hvf/x86_task.c4
-rw-r--r--target/i386/hvf/x86hvf.c6
-rw-r--r--target/i386/hvf/x86hvf.h2
7 files changed, 17 insertions, 56 deletions
diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h
index 6df87116f6..573ddc33c0 100644
--- a/target/i386/hvf/vmx.h
+++ b/target/i386/hvf/vmx.h
@@ -124,10 +124,11 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
uint64_t changed_cr0 = old_cr0 ^ cr0;
- uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET;
+ uint64_t mask = CR0_PG_MASK | CR0_CD_MASK | CR0_NW_MASK |
+ CR0_NE_MASK | CR0_ET_MASK;
uint64_t entry_ctls;
- if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
+ if ((cr0 & CR0_PG_MASK) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE_MASK) &&
!(efer & MSR_EFER_LME)) {
address_space_read(&address_space_memory,
rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
@@ -142,8 +143,8 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
if (efer & MSR_EFER_LME) {
- if (changed_cr0 & CR0_PG) {
- if (cr0 & CR0_PG) {
+ if (changed_cr0 & CR0_PG_MASK) {
+ if (cr0 & CR0_PG_MASK) {
enter_long_mode(vcpu, cr0, efer);
} else {
exit_long_mode(vcpu, cr0, efer);
@@ -155,23 +156,21 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
}
/* Filter new CR0 after we are finished examining it above. */
- cr0 = (cr0 & ~(mask & ~CR0_PG));
- wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);
+ cr0 = (cr0 & ~(mask & ~CR0_PG_MASK));
+ wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE_MASK | CR0_ET_MASK);
hv_vcpu_invalidate_tlb(vcpu);
- hv_vcpu_flush(vcpu);
}
static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
{
- uint64_t guest_cr4 = cr4 | CR4_VMXE;
+ uint64_t guest_cr4 = cr4 | CR4_VMXE_MASK;
wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);
wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);
- wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE);
+ wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE_MASK);
hv_vcpu_invalidate_tlb(vcpu);
- hv_vcpu_flush(vcpu);
}
static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c
index 2898bb70a8..91a3fe002c 100644
--- a/target/i386/hvf/x86.c
+++ b/target/i386/hvf/x86.c
@@ -119,7 +119,7 @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
bool x86_is_protected(struct CPUState *cpu)
{
uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
- return cr0 & CR0_PE;
+ return cr0 & CR0_PE_MASK;
}
bool x86_is_real(struct CPUState *cpu)
@@ -150,13 +150,13 @@ bool x86_is_long64_mode(struct CPUState *cpu)
bool x86_is_paging_mode(struct CPUState *cpu)
{
uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
- return cr0 & CR0_PG;
+ return cr0 & CR0_PG_MASK;
}
bool x86_is_pae_enabled(struct CPUState *cpu)
{
uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
- return cr4 & CR4_PAE;
+ return cr4 & CR4_PAE_MASK;
}
target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, X86Seg seg)
diff --git a/target/i386/hvf/x86.h b/target/i386/hvf/x86.h
index 782664c2ea..947b98da41 100644
--- a/target/i386/hvf/x86.h
+++ b/target/i386/hvf/x86.h
@@ -42,40 +42,6 @@ typedef struct x86_register {
};
} __attribute__ ((__packed__)) x86_register;
-typedef enum x86_reg_cr0 {
- CR0_PE = (1L << 0),
- CR0_MP = (1L << 1),
- CR0_EM = (1L << 2),
- CR0_TS = (1L << 3),
- CR0_ET = (1L << 4),
- CR0_NE = (1L << 5),
- CR0_WP = (1L << 16),
- CR0_AM = (1L << 18),
- CR0_NW = (1L << 29),
- CR0_CD = (1L << 30),
- CR0_PG = (1L << 31),
-} x86_reg_cr0;
-
-typedef enum x86_reg_cr4 {
- CR4_VME = (1L << 0),
- CR4_PVI = (1L << 1),
- CR4_TSD = (1L << 2),
- CR4_DE = (1L << 3),
- CR4_PSE = (1L << 4),
- CR4_PAE = (1L << 5),
- CR4_MSE = (1L << 6),
- CR4_PGE = (1L << 7),
- CR4_PCE = (1L << 8),
- CR4_OSFXSR = (1L << 9),
- CR4_OSXMMEXCPT = (1L << 10),
- CR4_VMXE = (1L << 13),
- CR4_SMXE = (1L << 14),
- CR4_FSGSBASE = (1L << 16),
- CR4_PCIDE = (1L << 17),
- CR4_OSXSAVE = (1L << 18),
- CR4_SMEP = (1L << 20),
-} x86_reg_cr4;
-
/* 16 bit Task State Segment */
typedef struct x86_tss_segment16 {
uint16_t link;
diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c
index e9ed0f5aa1..df0b91cd42 100644
--- a/target/i386/hvf/x86_mmu.c
+++ b/target/i386/hvf/x86_mmu.c
@@ -129,7 +129,7 @@ static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
uint32_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
/* check protection */
- if (cr0 & CR0_WP) {
+ if (cr0 & CR0_WP_MASK) {
if (pt->write_access && !pte_write_access(pte)) {
return false;
}
diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c
index 422156128b..d24daf6a41 100644
--- a/target/i386/hvf/x86_task.c
+++ b/target/i386/hvf/x86_task.c
@@ -174,12 +174,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
//ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
VM_PANIC("task_switch_16");
- macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_TS);
+ macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) |
+ CR0_TS_MASK);
x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
store_regs(cpu);
hv_vcpu_invalidate_tlb(cpu->hvf->fd);
- hv_vcpu_flush(cpu->hvf->fd);
}
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index 05ec1bddc4..bec9fc5814 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -83,7 +83,7 @@ void hvf_put_xsave(CPUState *cpu_state)
}
}
-void hvf_put_segments(CPUState *cpu_state)
+static void hvf_put_segments(CPUState *cpu_state)
{
CPUX86State *env = &X86_CPU(cpu_state)->env;
struct vmx_segment seg;
@@ -125,8 +125,6 @@ void hvf_put_segments(CPUState *cpu_state)
hvf_set_segment(cpu_state, &seg, &env->ldt, false);
vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR);
-
- hv_vcpu_flush(cpu_state->hvf->fd);
}
void hvf_put_msrs(CPUState *cpu_state)
@@ -166,7 +164,7 @@ void hvf_get_xsave(CPUState *cpu_state)
x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave, xsave_len);
}
-void hvf_get_segments(CPUState *cpu_state)
+static void hvf_get_segments(CPUState *cpu_state)
{
CPUX86State *env = &X86_CPU(cpu_state)->env;
diff --git a/target/i386/hvf/x86hvf.h b/target/i386/hvf/x86hvf.h
index 99ed8d608d..db6003d6bd 100644
--- a/target/i386/hvf/x86hvf.h
+++ b/target/i386/hvf/x86hvf.h
@@ -26,11 +26,9 @@ void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
SegmentCache *qseg, bool is_tr);
void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg);
void hvf_put_xsave(CPUState *cpu_state);
-void hvf_put_segments(CPUState *cpu_state);
void hvf_put_msrs(CPUState *cpu_state);
void hvf_get_xsave(CPUState *cpu_state);
void hvf_get_msrs(CPUState *cpu_state);
void vmx_clear_int_window_exiting(CPUState *cpu);
-void hvf_get_segments(CPUState *cpu_state);
void vmx_update_tpr(CPUState *cpu);
#endif