1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
|
/*
* Emulation of BSD signals
*
* Copyright (c) 2003 - 2008 Fabrice Bellard
* Copyright (c) 2013 Stacey Son
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu.h"
#include "signal-common.h"
#include "trace.h"
#include "hw/core/tcg-cpu-ops.h"
#include "host-signal.h"
/*
* Stubbed out routines until we merge signal support from bsd-user
* fork.
*/
static struct target_sigaction sigact_table[TARGET_NSIG];
static void host_signal_handler(int host_sig, siginfo_t *info, void *puc);
/*
* The BSD ABIs use the same singal numbers across all the CPU architectures, so
* (unlike Linux) these functions are just the identity mapping. This might not
* be true for XyzBSD running on AbcBSD, which doesn't currently work.
*/
int host_to_target_signal(int sig)
{
return sig;
}
int target_to_host_signal(int sig)
{
return sig;
}
/* Adjust the signal context to rewind out of safe-syscall if we're in it */
static inline void rewind_if_in_safe_syscall(void *puc)
{
ucontext_t *uc = (ucontext_t *)puc;
uintptr_t pcreg = host_signal_pc(uc);
if (pcreg > (uintptr_t)safe_syscall_start
&& pcreg < (uintptr_t)safe_syscall_end) {
host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
}
}
static bool has_trapno(int tsig)
{
return tsig == TARGET_SIGILL ||
tsig == TARGET_SIGFPE ||
tsig == TARGET_SIGSEGV ||
tsig == TARGET_SIGBUS ||
tsig == TARGET_SIGTRAP;
}
/* Siginfo conversion. */
/*
* Populate tinfo w/o swapping based on guessing which fields are valid.
*/
static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
const siginfo_t *info)
{
int sig = host_to_target_signal(info->si_signo);
int si_code = info->si_code;
int si_type;
/*
* Make sure we that the variable portion of the target siginfo is zeroed
* out so we don't leak anything into that.
*/
memset(&tinfo->_reason, 0, sizeof(tinfo->_reason));
/*
* This is awkward, because we have to use a combination of the si_code and
* si_signo to figure out which of the union's members are valid.o We
* therefore make our best guess.
*
* Once we have made our guess, we record it in the top 16 bits of
* the si_code, so that tswap_siginfo() later can use it.
* tswap_siginfo() will strip these top bits out before writing
* si_code to the guest (sign-extending the lower bits).
*/
tinfo->si_signo = sig;
tinfo->si_errno = info->si_errno;
tinfo->si_code = info->si_code;
tinfo->si_pid = info->si_pid;
tinfo->si_uid = info->si_uid;
tinfo->si_status = info->si_status;
tinfo->si_addr = (abi_ulong)(unsigned long)info->si_addr;
/*
* si_value is opaque to kernel. On all FreeBSD platforms,
* sizeof(sival_ptr) >= sizeof(sival_int) so the following
* always will copy the larger element.
*/
tinfo->si_value.sival_ptr =
(abi_ulong)(unsigned long)info->si_value.sival_ptr;
switch (si_code) {
/*
* All the SI_xxx codes that are defined here are global to
* all the signals (they have values that none of the other,
* more specific signal info will set).
*/
case SI_USER:
case SI_LWP:
case SI_KERNEL:
case SI_QUEUE:
case SI_ASYNCIO:
/*
* Only the fixed parts are valid (though FreeBSD doesn't always
* set all the fields to non-zero values.
*/
si_type = QEMU_SI_NOINFO;
break;
case SI_TIMER:
tinfo->_reason._timer._timerid = info->_reason._timer._timerid;
tinfo->_reason._timer._overrun = info->_reason._timer._overrun;
si_type = QEMU_SI_TIMER;
break;
case SI_MESGQ:
tinfo->_reason._mesgq._mqd = info->_reason._mesgq._mqd;
si_type = QEMU_SI_MESGQ;
break;
default:
/*
* We have to go based on the signal number now to figure out
* what's valid.
*/
if (has_trapno(sig)) {
tinfo->_reason._fault._trapno = info->_reason._fault._trapno;
si_type = QEMU_SI_FAULT;
}
#ifdef TARGET_SIGPOLL
/*
* FreeBSD never had SIGPOLL, but emulates it for Linux so there's
* a chance it may popup in the future.
*/
if (sig == TARGET_SIGPOLL) {
tinfo->_reason._poll._band = info->_reason._poll._band;
si_type = QEMU_SI_POLL;
}
#endif
/*
* Unsure that this can actually be generated, and our support for
* capsicum is somewhere between weak and non-existant, but if we get
* one, then we know what to save.
*/
if (sig == TARGET_SIGTRAP) {
tinfo->_reason._capsicum._syscall =
info->_reason._capsicum._syscall;
si_type = QEMU_SI_CAPSICUM;
}
break;
}
tinfo->si_code = deposit32(si_code, 24, 8, si_type);
}
/* Returns 1 if given signal should dump core if not handled. */
static int core_dump_signal(int sig)
{
switch (sig) {
case TARGET_SIGABRT:
case TARGET_SIGFPE:
case TARGET_SIGILL:
case TARGET_SIGQUIT:
case TARGET_SIGSEGV:
case TARGET_SIGTRAP:
case TARGET_SIGBUS:
return 1;
default:
return 0;
}
}
/* Abort execution with signal. */
static void QEMU_NORETURN dump_core_and_abort(int target_sig)
{
CPUArchState *env = thread_cpu->env_ptr;
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
int core_dumped = 0;
int host_sig;
struct sigaction act;
host_sig = target_to_host_signal(target_sig);
gdb_signalled(env, target_sig);
/* Dump core if supported by target binary format */
if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
stop_all_tasks();
core_dumped =
((*ts->bprm->core_dump)(target_sig, env) == 0);
}
if (core_dumped) {
struct rlimit nodump;
/*
* We already dumped the core of target process, we don't want
* a coredump of qemu itself.
*/
getrlimit(RLIMIT_CORE, &nodump);
nodump.rlim_cur = 0;
setrlimit(RLIMIT_CORE, &nodump);
(void) fprintf(stderr, "qemu: uncaught target signal %d (%s) "
"- %s\n", target_sig, strsignal(host_sig), "core dumped");
}
/*
* The proper exit code for dying from an uncaught signal is
* -<signal>. The kernel doesn't allow exit() or _exit() to pass
* a negative value. To get the proper exit code we need to
* actually die from an uncaught signal. Here the default signal
* handler is installed, we send ourself a signal and we wait for
* it to arrive.
*/
memset(&act, 0, sizeof(act));
sigfillset(&act.sa_mask);
act.sa_handler = SIG_DFL;
sigaction(host_sig, &act, NULL);
kill(getpid(), host_sig);
/*
* Make sure the signal isn't masked (just reuse the mask inside
* of act).
*/
sigdelset(&act.sa_mask, host_sig);
sigsuspend(&act.sa_mask);
/* unreachable */
abort();
}
/*
* Queue a signal so that it will be send to the virtual CPU as soon as
* possible.
*/
void queue_signal(CPUArchState *env, int sig, int si_type,
target_siginfo_t *info)
{
qemu_log_mask(LOG_UNIMP, "No signal queueing, dropping signal %d\n", sig);
}
static int fatal_signal(int sig)
{
switch (sig) {
case TARGET_SIGCHLD:
case TARGET_SIGURG:
case TARGET_SIGWINCH:
case TARGET_SIGINFO:
/* Ignored by default. */
return 0;
case TARGET_SIGCONT:
case TARGET_SIGSTOP:
case TARGET_SIGTSTP:
case TARGET_SIGTTIN:
case TARGET_SIGTTOU:
/* Job control signals. */
return 0;
default:
return 1;
}
}
/*
* Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
* 'force' part is handled in process_pending_signals().
*/
void force_sig_fault(int sig, int code, abi_ulong addr)
{
CPUState *cpu = thread_cpu;
CPUArchState *env = cpu->env_ptr;
target_siginfo_t info = {};
info.si_signo = sig;
info.si_errno = 0;
info.si_code = code;
info.si_addr = addr;
queue_signal(env, sig, QEMU_SI_FAULT, &info);
}
static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
{
CPUArchState *env = thread_cpu->env_ptr;
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
target_siginfo_t tinfo;
ucontext_t *uc = puc;
struct emulated_sigtable *k;
int guest_sig;
uintptr_t pc = 0;
bool sync_sig = false;
/*
* Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
* handling wrt signal blocking and unwinding.
*/
if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
MMUAccessType access_type;
uintptr_t host_addr;
abi_ptr guest_addr;
bool is_write;
host_addr = (uintptr_t)info->si_addr;
/*
* Convert forcefully to guest address space: addresses outside
* reserved_va are still valid to report via SEGV_MAPERR.
*/
guest_addr = h2g_nocheck(host_addr);
pc = host_signal_pc(uc);
is_write = host_signal_write(info, uc);
access_type = adjust_signal_pc(&pc, is_write);
if (host_sig == SIGSEGV) {
bool maperr = true;
if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
/* If this was a write to a TB protected page, restart. */
if (is_write &&
handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
pc, guest_addr)) {
return;
}
/*
* With reserved_va, the whole address space is PROT_NONE,
* which means that we may get ACCERR when we want MAPERR.
*/
if (page_get_flags(guest_addr) & PAGE_VALID) {
maperr = false;
} else {
info->si_code = SEGV_MAPERR;
}
}
sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
} else {
sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
if (info->si_code == BUS_ADRALN) {
cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
}
}
sync_sig = true;
}
/* Get the target signal number. */
guest_sig = host_to_target_signal(host_sig);
if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
return;
}
trace_user_host_signal(cpu, host_sig, guest_sig);
host_to_target_siginfo_noswap(&tinfo, info);
k = &ts->sigtab[guest_sig - 1];
k->info = tinfo;
k->pending = guest_sig;
ts->signal_pending = 1;
/*
* For synchronous signals, unwind the cpu state to the faulting
* insn and then exit back to the main loop so that the signal
* is delivered immediately.
*/
if (sync_sig) {
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit_restore(cpu, pc);
}
rewind_if_in_safe_syscall(puc);
/*
* Block host signals until target signal handler entered. We
* can't block SIGSEGV or SIGBUS while we're executing guest
* code in case the guest code provokes one in the window between
* now and it getting out to the main loop. Signals will be
* unblocked again in process_pending_signals().
*/
sigfillset(&uc->uc_sigmask);
sigdelset(&uc->uc_sigmask, SIGSEGV);
sigdelset(&uc->uc_sigmask, SIGBUS);
/* Interrupt the virtual CPU as soon as possible. */
cpu_exit(thread_cpu);
}
void signal_init(void)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
struct sigaction act;
struct sigaction oact;
int i;
int host_sig;
/* Set the signal mask from the host mask. */
sigprocmask(0, 0, &ts->signal_mask);
sigfillset(&act.sa_mask);
act.sa_sigaction = host_signal_handler;
act.sa_flags = SA_SIGINFO;
for (i = 1; i <= TARGET_NSIG; i++) {
#ifdef CONFIG_GPROF
if (i == TARGET_SIGPROF) {
continue;
}
#endif
host_sig = target_to_host_signal(i);
sigaction(host_sig, NULL, &oact);
if (oact.sa_sigaction == (void *)SIG_IGN) {
sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
} else if (oact.sa_sigaction == (void *)SIG_DFL) {
sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
}
/*
* If there's already a handler installed then something has
* gone horribly wrong, so don't even try to handle that case.
* Install some handlers for our own use. We need at least
* SIGSEGV and SIGBUS, to detect exceptions. We can not just
* trap all signals because it affects syscall interrupt
* behavior. But do trap all default-fatal signals.
*/
if (fatal_signal(i)) {
sigaction(host_sig, &act, NULL);
}
}
}
void process_pending_signals(CPUArchState *cpu_env)
{
}
void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
MMUAccessType access_type, bool maperr, uintptr_t ra)
{
const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
if (tcg_ops->record_sigsegv) {
tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
}
force_sig_fault(TARGET_SIGSEGV,
maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
addr);
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit_restore(cpu, ra);
}
void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
MMUAccessType access_type, uintptr_t ra)
{
const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
if (tcg_ops->record_sigbus) {
tcg_ops->record_sigbus(cpu, addr, access_type, ra);
}
force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit_restore(cpu, ra);
}
|