aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/elf.h129
-rw-r--r--include/exec/cpu-defs.h2
-rw-r--r--include/exec/exec-all.h5
-rw-r--r--include/hw/i386/pc.h28
-rw-r--r--include/qemu/typedefs.h2
-rw-r--r--include/qom/cpu.h23
-rw-r--r--include/sysemu/memory_mapping.h16
7 files changed, 191 insertions, 14 deletions
diff --git a/include/elf.h b/include/elf.h
index a21ea535bd..cf0d3e2bd6 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -129,6 +129,8 @@ typedef int64_t Elf64_Sxword;
#define EM_XTENSA 94 /* Tensilica Xtensa */
+#define EM_AARCH64 183
+
/* This is the info that is needed to parse the dynamic section of the file */
#define DT_NULL 0
#define DT_NEEDED 1
@@ -616,6 +618,133 @@ typedef struct {
/* Keep this the last entry. */
#define R_ARM_NUM 256
+/* ARM Aarch64 relocation types */
+#define R_AARCH64_NONE 256 /* also accepts R_ARM_NONE (0) */
+/* static data relocations */
+#define R_AARCH64_ABS64 257
+#define R_AARCH64_ABS32 258
+#define R_AARCH64_ABS16 259
+#define R_AARCH64_PREL64 260
+#define R_AARCH64_PREL32 261
+#define R_AARCH64_PREL16 262
+/* static aarch64 group relocations */
+/* group relocs to create unsigned data value or address inline */
+#define R_AARCH64_MOVW_UABS_G0 263
+#define R_AARCH64_MOVW_UABS_G0_NC 264
+#define R_AARCH64_MOVW_UABS_G1 265
+#define R_AARCH64_MOVW_UABS_G1_NC 266
+#define R_AARCH64_MOVW_UABS_G2 267
+#define R_AARCH64_MOVW_UABS_G2_NC 268
+#define R_AARCH64_MOVW_UABS_G3 269
+/* group relocs to create signed data or offset value inline */
+#define R_AARCH64_MOVW_SABS_G0 270
+#define R_AARCH64_MOVW_SABS_G1 271
+#define R_AARCH64_MOVW_SABS_G2 272
+/* relocs to generate 19, 21, and 33 bit PC-relative addresses */
+#define R_AARCH64_LD_PREL_LO19 273
+#define R_AARCH64_ADR_PREL_LO21 274
+#define R_AARCH64_ADR_PREL_PG_HI21 275
+#define R_AARCH64_ADR_PREL_PG_HI21_NC 276
+#define R_AARCH64_ADD_ABS_LO12_NC 277
+#define R_AARCH64_LDST8_ABS_LO12_NC 278
+#define R_AARCH64_LDST16_ABS_LO12_NC 284
+#define R_AARCH64_LDST32_ABS_LO12_NC 285
+#define R_AARCH64_LDST64_ABS_LO12_NC 286
+#define R_AARCH64_LDST128_ABS_LO12_NC 299
+/* relocs for control-flow - all offsets as multiple of 4 */
+#define R_AARCH64_TSTBR14 279
+#define R_AARCH64_CONDBR19 280
+#define R_AARCH64_JUMP26 282
+#define R_AARCH64_CALL26 283
+/* group relocs to create pc-relative offset inline */
+#define R_AARCH64_MOVW_PREL_G0 287
+#define R_AARCH64_MOVW_PREL_G0_NC 288
+#define R_AARCH64_MOVW_PREL_G1 289
+#define R_AARCH64_MOVW_PREL_G1_NC 290
+#define R_AARCH64_MOVW_PREL_G2 291
+#define R_AARCH64_MOVW_PREL_G2_NC 292
+#define R_AARCH64_MOVW_PREL_G3 293
+/* group relocs to create a GOT-relative offset inline */
+#define R_AARCH64_MOVW_GOTOFF_G0 300
+#define R_AARCH64_MOVW_GOTOFF_G0_NC 301
+#define R_AARCH64_MOVW_GOTOFF_G1 302
+#define R_AARCH64_MOVW_GOTOFF_G1_NC 303
+#define R_AARCH64_MOVW_GOTOFF_G2 304
+#define R_AARCH64_MOVW_GOTOFF_G2_NC 305
+#define R_AARCH64_MOVW_GOTOFF_G3 306
+/* GOT-relative data relocs */
+#define R_AARCH64_GOTREL64 307
+#define R_AARCH64_GOTREL32 308
+/* GOT-relative instr relocs */
+#define R_AARCH64_GOT_LD_PREL19 309
+#define R_AARCH64_LD64_GOTOFF_LO15 310
+#define R_AARCH64_ADR_GOT_PAGE 311
+#define R_AARCH64_LD64_GOT_LO12_NC 312
+#define R_AARCH64_LD64_GOTPAGE_LO15 313
+/* General Dynamic TLS relocations */
+#define R_AARCH64_TLSGD_ADR_PREL21 512
+#define R_AARCH64_TLSGD_ADR_PAGE21 513
+#define R_AARCH64_TLSGD_ADD_LO12_NC 514
+#define R_AARCH64_TLSGD_MOVW_G1 515
+#define R_AARCH64_TLSGD_MOVW_G0_NC 516
+/* Local Dynamic TLS relocations */
+#define R_AARCH64_TLSLD_ADR_PREL21 517
+#define R_AARCH64_TLSLD_ADR_PAGE21 518
+#define R_AARCH64_TLSLD_ADD_LO12_NC 519
+#define R_AARCH64_TLSLD_MOVW_G1 520
+#define R_AARCH64_TLSLD_MOVW_G0_NC 521
+#define R_AARCH64_TLSLD_LD_PREL19 522
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G2 523
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1 524
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC 525
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0 526
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC 527
+#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12 529
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC 530
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12 531
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC 532
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12 533
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC 534
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12 535
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC 536
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12 537
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC 538
+/* initial exec TLS relocations */
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 539
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC 540
+#define R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 541
+#define R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC 542
+#define R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 543
+/* local exec TLS relocations */
+#define R_AARCH64_TLSLE_MOVW_TPREL_G2 544
+#define R_AARCH64_TLSLE_MOVW_TPREL_G1 545
+#define R_AARCH64_TLSLE_MOVW_TPREL_G1_NC 546
+#define R_AARCH64_TLSLE_MOVW_TPREL_G0 547
+#define R_AARCH64_TLSLE_MOVW_TPREL_G0_NC 548
+#define R_AARCH64_TLSLE_ADD_TPREL_HI12 549
+#define R_AARCH64_TLSLE_ADD_TPREL_LO12 550
+#define R_AARCH64_TLSLE_ADD_TPREL_LO12_NC 551
+#define R_AARCH64_TLSLE_LDST8_TPREL_LO12 552
+#define R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC 553
+#define R_AARCH64_TLSLE_LDST16_TPREL_LO12 554
+#define R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC 555
+#define R_AARCH64_TLSLE_LDST32_TPREL_LO12 556
+#define R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC 557
+#define R_AARCH64_TLSLE_LDST64_TPREL_LO12 558
+#define R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC 559
+/* Dynamic Relocations */
+#define R_AARCH64_COPY 1024
+#define R_AARCH64_GLOB_DAT 1025
+#define R_AARCH64_JUMP_SLOT 1026
+#define R_AARCH64_RELATIVE 1027
+#define R_AARCH64_TLS_DTPREL64 1028
+#define R_AARCH64_TLS_DTPMOD64 1029
+#define R_AARCH64_TLS_TPREL64 1030
+#define R_AARCH64_TLS_DTPREL32 1031
+#define R_AARCH64_TLS_DTPMOD32 1032
+#define R_AARCH64_TLS_TPREL32 1033
+
/* s390 relocations defined by the ABIs */
#define R_390_NONE 0 /* No reloc. */
#define R_390_8 1 /* Direct 8 bit. */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index d8c64e9278..2e5a9bab3c 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -99,7 +99,7 @@ typedef struct CPUTLBEntry {
sizeof(uintptr_t))];
} CPUTLBEntry;
-extern int CPUTLBEntry_wrong_size[sizeof(CPUTLBEntry) == (1 << CPU_TLB_ENTRY_BITS) ? 1 : -1];
+QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
#define CPU_COMMON_TLB \
/* The meaning of the MMU modes is defined in the target code. */ \
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 17fde25c74..b2162a4ec4 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -128,7 +128,7 @@ static inline void tlb_flush(CPUArchState *env, int flush_global)
#if defined(__arm__) || defined(_ARCH_PPC) \
|| defined(__x86_64__) || defined(__i386__) \
- || defined(__sparc__) \
+ || defined(__sparc__) || defined(__aarch64__) \
|| defined(CONFIG_TCG_INTERPRETER)
#define USE_DIRECT_JUMP
#endif
@@ -230,6 +230,9 @@ static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
*(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
/* no need to flush icache explicitly */
}
+#elif defined(__aarch64__)
+void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
+#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
#elif defined(__arm__)
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index 61540587a4..fab9be51cc 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -185,7 +185,35 @@ int pvpanic_init(ISABus *bus);
int e820_add_entry(uint64_t, uint64_t, uint32_t);
+#define PC_COMPAT_1_5 \
+ {\
+ .driver = "Conroe-" TYPE_X86_CPU,\
+ .property = "model",\
+ .value = stringify(2),\
+ },{\
+ .driver = "Conroe-" TYPE_X86_CPU,\
+ .property = "level",\
+ .value = stringify(2),\
+ },{\
+ .driver = "Penryn-" TYPE_X86_CPU,\
+ .property = "model",\
+ .value = stringify(2),\
+ },{\
+ .driver = "Penryn-" TYPE_X86_CPU,\
+ .property = "level",\
+ .value = stringify(2),\
+ },{\
+ .driver = "Nehalem-" TYPE_X86_CPU,\
+ .property = "model",\
+ .value = stringify(2),\
+ },{\
+ .driver = "Nehalem-" TYPE_X86_CPU,\
+ .property = "level",\
+ .value = stringify(2),\
+ }
+
#define PC_COMPAT_1_4 \
+ PC_COMPAT_1_5, \
{\
.driver = "scsi-hd",\
.property = "discard_granularity",\
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index afe4ec76e1..698fc03d78 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -22,6 +22,8 @@ typedef struct AddressSpace AddressSpace;
typedef struct MemoryRegion MemoryRegion;
typedef struct MemoryRegionSection MemoryRegionSection;
+typedef struct MemoryMappingList MemoryMappingList;
+
typedef struct NICInfo NICInfo;
typedef struct HCIInfo HCIInfo;
typedef struct AudioState AudioState;
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 7cd9442503..a5bb515978 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -23,6 +23,7 @@
#include <signal.h>
#include "hw/qdev-core.h"
#include "qemu/thread.h"
+#include "qemu/typedefs.h"
typedef int (*WriteCoreDumpFunction)(void *buf, size_t size, void *opaque);
@@ -48,6 +49,8 @@ typedef struct CPUState CPUState;
* @reset: Callback to reset the #CPUState to its initial state.
* @do_interrupt: Callback for interrupt handling.
* @get_arch_id: Callback for getting architecture-dependent CPU ID.
+ * @get_paging_enabled: Callback for inquiring whether paging is enabled.
+ * @get_memory_mapping: Callback for obtaining the memory mappings.
* @vmsd: State description for migration.
*
* Represents a CPU family or model.
@@ -62,6 +65,9 @@ typedef struct CPUClass {
void (*reset)(CPUState *cpu);
void (*do_interrupt)(CPUState *cpu);
int64_t (*get_arch_id)(CPUState *cpu);
+ bool (*get_paging_enabled)(const CPUState *cpu);
+ void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
+ Error **errp);
const struct VMStateDescription *vmsd;
int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
@@ -138,6 +144,23 @@ struct CPUState {
};
/**
+ * cpu_paging_enabled:
+ * @cpu: The CPU whose state is to be inspected.
+ *
+ * Returns: %true if paging is enabled, %false otherwise.
+ */
+bool cpu_paging_enabled(const CPUState *cpu);
+
+/**
+ * cpu_get_memory_mapping:
+ * @cpu: The CPU whose memory mappings are to be obtained.
+ * @list: Where to write the memory mappings to.
+ * @errp: Pointer for reporting an #Error.
+ */
+void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
+ Error **errp);
+
+/**
* cpu_write_elf64_note:
* @f: pointer to a function that writes memory to a file
* @cpu: The CPU whose memory is to be dumped
diff --git a/include/sysemu/memory_mapping.h b/include/sysemu/memory_mapping.h
index 1256125963..6dfb68ddcd 100644
--- a/include/sysemu/memory_mapping.h
+++ b/include/sysemu/memory_mapping.h
@@ -15,6 +15,7 @@
#define MEMORY_MAPPING_H
#include "qemu/queue.h"
+#include "qemu/typedefs.h"
/* The physical and virtual address in the memory mapping are contiguous. */
typedef struct MemoryMapping {
@@ -24,14 +25,11 @@ typedef struct MemoryMapping {
QTAILQ_ENTRY(MemoryMapping) next;
} MemoryMapping;
-typedef struct MemoryMappingList {
+struct MemoryMappingList {
unsigned int num;
MemoryMapping *last_mapping;
QTAILQ_HEAD(, MemoryMapping) head;
-} MemoryMappingList;
-
-int cpu_get_memory_mapping(MemoryMappingList *list, CPUArchState *env);
-bool cpu_paging_enabled(CPUArchState *env);
+};
/*
* add or merge the memory region [phys_addr, phys_addr + length) into the
@@ -47,13 +45,7 @@ void memory_mapping_list_free(MemoryMappingList *list);
void memory_mapping_list_init(MemoryMappingList *list);
-/*
- * Return value:
- * 0: success
- * -1: failed
- * -2: unsupported
- */
-int qemu_get_guest_memory_mapping(MemoryMappingList *list);
+void qemu_get_guest_memory_mapping(MemoryMappingList *list, Error **errp);
/* get guest's memory mapping without do paging(virtual address is 0). */
void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list);