1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
|
/*
* qemu user cpu loop
*
* Copyright (c) 2003-2008 Fabrice Bellard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu.h"
#include "user-internals.h"
#include "cpu_loop-common.h"
#include "signal-common.h"
#include "qemu/guest-random.h"
#include "semihosting/common-semi.h"
#include "target/arm/syndrome.h"
#define get_user_code_u32(x, gaddr, env) \
({ abi_long __r = get_user_u32((x), (gaddr)); \
if (!__r && bswap_code(arm_sctlr_b(env))) { \
(x) = bswap32(x); \
} \
__r; \
})
#define get_user_code_u16(x, gaddr, env) \
({ abi_long __r = get_user_u16((x), (gaddr)); \
if (!__r && bswap_code(arm_sctlr_b(env))) { \
(x) = bswap16(x); \
} \
__r; \
})
#define get_user_data_u32(x, gaddr, env) \
({ abi_long __r = get_user_u32((x), (gaddr)); \
if (!__r && arm_cpu_bswap_data(env)) { \
(x) = bswap32(x); \
} \
__r; \
})
#define get_user_data_u16(x, gaddr, env) \
({ abi_long __r = get_user_u16((x), (gaddr)); \
if (!__r && arm_cpu_bswap_data(env)) { \
(x) = bswap16(x); \
} \
__r; \
})
#define put_user_data_u32(x, gaddr, env) \
({ typeof(x) __x = (x); \
if (arm_cpu_bswap_data(env)) { \
__x = bswap32(__x); \
} \
put_user_u32(__x, (gaddr)); \
})
#define put_user_data_u16(x, gaddr, env) \
({ typeof(x) __x = (x); \
if (arm_cpu_bswap_data(env)) { \
__x = bswap16(__x); \
} \
put_user_u16(__x, (gaddr)); \
})
/* AArch64 main loop */
void cpu_loop(CPUARMState *env)
{
CPUState *cs = env_cpu(env);
int trapnr, ec, fsc, si_code, si_signo;
abi_long ret;
for (;;) {
cpu_exec_start(cs);
trapnr = cpu_exec(cs);
cpu_exec_end(cs);
process_queued_cpu_work(cs);
switch (trapnr) {
case EXCP_SWI:
ret = do_syscall(env,
env->xregs[8],
env->xregs[0],
env->xregs[1],
env->xregs[2],
env->xregs[3],
env->xregs[4],
env->xregs[5],
0, 0);
if (ret == -TARGET_ERESTARTSYS) {
env->pc -= 4;
} else if (ret != -TARGET_QEMU_ESIGRETURN) {
env->xregs[0] = ret;
}
break;
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
case EXCP_UDEF:
force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, env->pc);
break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
/* We should only arrive here with EC in {DATAABORT, INSNABORT}. */
ec = syn_get_ec(env->exception.syndrome);
assert(ec == EC_DATAABORT || ec == EC_INSNABORT);
/* Both EC have the same format for FSC, or close enough. */
fsc = extract32(env->exception.syndrome, 0, 6);
switch (fsc) {
case 0x04 ... 0x07: /* Translation fault, level {0-3} */
si_signo = TARGET_SIGSEGV;
si_code = TARGET_SEGV_MAPERR;
break;
case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */
case 0x0d ... 0x0f: /* Permission fault, level {1-3} */
si_signo = TARGET_SIGSEGV;
si_code = TARGET_SEGV_ACCERR;
break;
case 0x11: /* Synchronous Tag Check Fault */
si_signo = TARGET_SIGSEGV;
si_code = TARGET_SEGV_MTESERR;
break;
case 0x21: /* Alignment fault */
si_signo = TARGET_SIGBUS;
si_code = TARGET_BUS_ADRALN;
break;
default:
g_assert_not_reached();
}
force_sig_fault(si_signo, si_code, env->exception.vaddress);
break;
case EXCP_DEBUG:
case EXCP_BKPT:
force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc);
break;
case EXCP_SEMIHOST:
env->xregs[0] = do_common_semihosting(cs);
env->pc += 4;
break;
case EXCP_YIELD:
/* nothing to do here for user-mode, just resume guest code */
break;
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
break;
default:
EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
abort();
}
/* Check for MTE asynchronous faults */
if (unlikely(env->cp15.tfsr_el[0])) {
env->cp15.tfsr_el[0] = 0;
force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MTEAERR, 0);
}
process_pending_signals(env);
/* Exception return on AArch64 always clears the exclusive monitor,
* so any return to running guest code implies this.
*/
env->exclusive_addr = -1;
}
}
void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
{
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
TaskState *ts = cs->opaque;
struct image_info *info = ts->info;
int i;
if (!(arm_feature(env, ARM_FEATURE_AARCH64))) {
fprintf(stderr,
"The selected ARM CPU does not support 64 bit mode\n");
exit(EXIT_FAILURE);
}
for (i = 0; i < 31; i++) {
env->xregs[i] = regs->regs[i];
}
env->pc = regs->pc;
env->xregs[31] = regs->sp;
#ifdef TARGET_WORDS_BIGENDIAN
env->cp15.sctlr_el[1] |= SCTLR_E0E;
for (i = 1; i < 4; ++i) {
env->cp15.sctlr_el[i] |= SCTLR_EE;
}
arm_rebuild_hflags(env);
#endif
if (cpu_isar_feature(aa64_pauth, cpu)) {
qemu_guest_getrandom_nofail(&env->keys, sizeof(env->keys));
}
ts->stack_base = info->start_stack;
ts->heap_base = info->brk;
/* This will be filled in on the first SYS_HEAPINFO call. */
ts->heap_limit = 0;
}
|