aboutsummaryrefslogtreecommitdiff
path: root/tcg/tci.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-10-21 10:47:54 +1000
committerRichard Henderson <richard.henderson@linaro.org>2023-02-04 06:19:42 -1000
commite9709e17ac88f16c60004c4160c9a131d36ed564 (patch)
tree05ef76aa676a95fd3c8325090e5ba0035eead71c /tcg/tci.c
parent896c76e6ba5d9a3444fb8528fdc407747ecc82f2 (diff)
tcg/tci: Add TCG_TARGET_CALL_{RET,ARG}_I128
Fill in the parameters for libffi for Int128. Adjust the interpreter to allow for 16-byte return values. Adjust tcg_out_call to record the return value length. Call parameters are no longer all the same size, so we cannot reuse the same call_slots array for every function. Compute it each time now, but only fill in slots required for the call we're about to make. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/tci.c')
-rw-r--r--tcg/tci.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/tcg/tci.c b/tcg/tci.c
index eeccdde8bc..022fe9d0f8 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -470,12 +470,9 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tcg_target_ulong regs[TCG_TARGET_NB_REGS];
uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
/ sizeof(uint64_t)];
- void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)];
regs[TCG_AREG0] = (tcg_target_ulong)env;
regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
- /* Other call_slots entries initialized at first use (see below). */
- call_slots[0] = NULL;
tci_assert(tb_ptr);
for (;;) {
@@ -498,26 +495,26 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
switch (opc) {
case INDEX_op_call:
- /*
- * Set up the ffi_avalue array once, delayed until now
- * because many TB's do not make any calls. In tcg_gen_callN,
- * we arranged for every real argument to be "left-aligned"
- * in each 64-bit slot.
- */
- if (unlikely(call_slots[0] == NULL)) {
- for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) {
- call_slots[i] = &stack[i];
+ {
+ void *call_slots[MAX_CALL_IARGS];
+ ffi_cif *cif;
+ void *func;
+ unsigned i, s, n;
+
+ tci_args_nl(insn, tb_ptr, &len, &ptr);
+ func = ((void **)ptr)[0];
+ cif = ((void **)ptr)[1];
+
+ n = cif->nargs;
+ for (i = s = 0; i < n; ++i) {
+ ffi_type *t = cif->arg_types[i];
+ call_slots[i] = &stack[s];
+ s += DIV_ROUND_UP(t->size, 8);
}
- }
-
- tci_args_nl(insn, tb_ptr, &len, &ptr);
- /* Helper functions may need to access the "return address" */
- tci_tb_ptr = (uintptr_t)tb_ptr;
-
- {
- void **pptr = ptr;
- ffi_call(pptr[1], pptr[0], stack, call_slots);
+ /* Helper functions may need to access the "return address" */
+ tci_tb_ptr = (uintptr_t)tb_ptr;
+ ffi_call(cif, func, stack, call_slots);
}
switch (len) {
@@ -542,6 +539,9 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
*/
memcpy(&regs[TCG_REG_R0], stack, 8);
break;
+ case 3: /* Int128 */
+ memcpy(&regs[TCG_REG_R0], stack, 16);
+ break;
default:
g_assert_not_reached();
}