diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2020-01-27 13:02:36 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2020-01-27 13:02:36 +0000 |
commit | 105b07f1ba462ec48b27e5cb74ddf81c6a79364c (patch) | |
tree | b52e9840a334833a45da239ca8b6f9902f3217d6 | |
parent | 760df0d121a836dcbf3726b80b820115aef21b30 (diff) | |
parent | 4103500e2fa934a6995e4cedab37423e606715bf (diff) |
Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20200127' into staging
s390x changes:
- kvm: re-enable adapter interrupt suppression (AIS)
- fixes and cleanups
# gpg: Signature made Mon 27 Jan 2020 12:14:12 GMT
# gpg: using RSA key C3D0D66DC3624FF6A8C018CEDECF6B93C6F02FAF
# gpg: issuer "cohuck@redhat.com"
# gpg: Good signature from "Cornelia Huck <conny@cornelia-huck.de>" [marginal]
# gpg: aka "Cornelia Huck <huckc@linux.vnet.ibm.com>" [full]
# gpg: aka "Cornelia Huck <cornelia.huck@de.ibm.com>" [full]
# gpg: aka "Cornelia Huck <cohuck@kernel.org>" [marginal]
# gpg: aka "Cornelia Huck <cohuck@redhat.com>" [marginal]
# Primary key fingerprint: C3D0 D66D C362 4FF6 A8C0 18CE DECF 6B93 C6F0 2FAF
* remotes/cohuck/tags/s390x-20200127:
s390x: sigp: Fix sense running reporting
hw/s390x: Add a more verbose comment about get_machine_class() and the wrappers
target/s390x: Remove DisasFields argument from extract_insn
target/s390x: Move DisasFields into DisasContext
target/s390x: Pass DisasContext to get_field and have_field
target/s390x: Remove DisasFields argument from callbacks
target/s390x: Move struct DisasFields definition earlier
target/s390x/kvm: Enable adapter interruption suppression again
docs/devel: fix stable process doc formatting
target/s390x: Remove duplicated ifdef macro
s390x/event-facility: fix error propagation
s390x: adapter routes error handling
s390x/event-facility.c: remove unneeded labels
intc/s390_flic_kvm.c: remove unneeded label in kvm_flic_load()
s390x/sclp.c: remove unneeded label in sclp_service_call()
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r-- | docs/devel/stable-process.rst | 6 | ||||
-rw-r--r-- | hw/intc/s390_flic_kvm.c | 18 | ||||
-rw-r--r-- | hw/s390x/event-facility.c | 35 | ||||
-rw-r--r-- | hw/s390x/s390-virtio-ccw.c | 16 | ||||
-rw-r--r-- | hw/s390x/sclp.c | 16 | ||||
-rw-r--r-- | hw/s390x/virtio-ccw.c | 4 | ||||
-rw-r--r-- | target/s390x/excp_helper.c | 7 | ||||
-rw-r--r-- | target/s390x/kvm.c | 9 | ||||
-rw-r--r-- | target/s390x/sigp.c | 4 | ||||
-rw-r--r-- | target/s390x/translate.c | 982 | ||||
-rw-r--r-- | target/s390x/translate_vx.inc.c | 649 |
11 files changed, 874 insertions, 872 deletions
diff --git a/docs/devel/stable-process.rst b/docs/devel/stable-process.rst index 98736a9ea4..e541b983fa 100644 --- a/docs/devel/stable-process.rst +++ b/docs/devel/stable-process.rst @@ -18,8 +18,10 @@ What should go into a stable release? ------------------------------------- Generally, the following patches are considered stable material: -- Patches that fix severe issues, like fixes for CVEs -- Patches that fix regressions + +* Patches that fix severe issues, like fixes for CVEs + +* Patches that fix regressions If you think the patch would be important for users of the current release (or for a distribution picking fixes), it is usually a good candidate diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c index dddd33ea61..a306b26faa 100644 --- a/hw/intc/s390_flic_kvm.c +++ b/hw/intc/s390_flic_kvm.c @@ -331,6 +331,10 @@ static int kvm_s390_add_adapter_routes(S390FLICState *fs, int ret, i; uint64_t ind_offset = routes->adapter.ind_offset; + if (!kvm_gsi_routing_enabled()) { + return -ENOSYS; + } + for (i = 0; i < routes->num_routes; i++) { ret = kvm_irqchip_add_adapter_route(kvm_state, &routes->adapter); if (ret < 0) { @@ -358,6 +362,10 @@ static void kvm_s390_release_adapter_routes(S390FLICState *fs, { int i; + if (!kvm_gsi_routing_enabled()) { + return; + } + for (i = 0; i < routes->num_routes; i++) { if (routes->gsi[i] >= 0) { kvm_irqchip_release_virq(kvm_state, routes->gsi[i]); @@ -439,17 +447,14 @@ static int kvm_flic_load(QEMUFile *f, void *opaque, size_t size, count = qemu_get_be64(f); len = count * sizeof(struct kvm_s390_irq); if (count == FLIC_FAILED) { - r = -EINVAL; - goto out; + return -EINVAL; } if (count == 0) { - r = 0; - goto out; + return 0; } buf = g_try_malloc0(len); if (!buf) { - r = -ENOMEM; - goto out; + return -ENOMEM; } if (qemu_get_buffer(f, (uint8_t *) buf, len) != len) { @@ -460,7 +465,6 @@ static int kvm_flic_load(QEMUFile *f, void *opaque, size_t size, out_free: g_free(buf); -out: return r; } diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c index 6afe278cad..9d6972afa8 100644 --- a/hw/s390x/event-facility.c +++ b/hw/s390x/event-facility.c @@ -182,11 +182,11 @@ static void write_event_data(SCLPEventFacility *ef, SCCB *sccb) { if (sccb->h.function_code != SCLP_FC_NORMAL_WRITE) { sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION); - goto out; + return; } if (be16_to_cpu(sccb->h.length) < 8) { sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); - goto out; + return; } /* first do a sanity check of the write events */ sccb->h.response_code = cpu_to_be16(write_event_length_check(sccb)); @@ -196,9 +196,6 @@ static void write_event_data(SCLPEventFacility *ef, SCCB *sccb) sccb->h.response_code = cpu_to_be16(handle_sccb_write_events(ef, sccb)); } - -out: - return; } static uint16_t handle_sccb_read_events(SCLPEventFacility *ef, SCCB *sccb, @@ -262,17 +259,18 @@ static void read_event_data(SCLPEventFacility *ef, SCCB *sccb) if (be16_to_cpu(sccb->h.length) != SCCB_SIZE) { sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); - goto out; + return; } - sclp_cp_receive_mask = ef->receive_mask; - - /* get active selection mask */ switch (sccb->h.function_code) { case SCLP_UNCONDITIONAL_READ: - sclp_active_selection_mask = sclp_cp_receive_mask; + sccb->h.response_code = cpu_to_be16( + handle_sccb_read_events(ef, sccb, ef->receive_mask)); break; case SCLP_SELECTIVE_READ: + /* get active selection mask */ + sclp_cp_receive_mask = ef->receive_mask; + copy_mask((uint8_t *)&sclp_active_selection_mask, (uint8_t *)&red->mask, sizeof(sclp_active_selection_mask), ef->mask_length); sclp_active_selection_mask = be64_to_cpu(sclp_active_selection_mask); @@ -280,18 +278,14 @@ static void read_event_data(SCLPEventFacility *ef, SCCB *sccb) (sclp_active_selection_mask & ~sclp_cp_receive_mask)) { sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK); - goto out; + } else { + sccb->h.response_code = cpu_to_be16( + handle_sccb_read_events(ef, sccb, sclp_active_selection_mask)); } break; default: sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION); - goto out; } - sccb->h.response_code = cpu_to_be16( - handle_sccb_read_events(ef, sccb, sclp_active_selection_mask)); - -out: - return; } static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb) @@ -303,7 +297,7 @@ static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb) if (!mask_length || (mask_length > SCLP_EVENT_MASK_LEN_MAX) || ((mask_length != 4) && !ef->allow_all_mask_sizes)) { sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH); - goto out; + return; } /* @@ -328,9 +322,6 @@ static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb) sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); ef->mask_length = mask_length; - -out: - return; } /* qemu object creation and initialization functions */ @@ -347,7 +338,7 @@ static void sclp_events_bus_realize(BusState *bus, Error **errp) DeviceState *dev = kid->child; object_property_set_bool(OBJECT(dev), true, "realized", &err); - if (errp) { + if (err) { error_propagate(errp, err); return; } diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index e0e28139a2..e759eb5f83 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -505,6 +505,19 @@ static inline void machine_set_dea_key_wrap(Object *obj, bool value, static S390CcwMachineClass *current_mc; +/* + * Get the class of the s390-ccw-virtio machine that is currently in use. + * Note: libvirt is using the "none" machine to probe for the features of the + * host CPU, so in case this is called with the "none" machine, the function + * returns the TYPE_S390_CCW_MACHINE base class. In this base class, all the + * various "*_allowed" variables are enabled, so that the *_allowed() wrappers + * below return the correct default value for the "none" machine. + * + * Attention! Do *not* add additional new wrappers for CPU features (e.g. like + * the ri_allowed() wrapper) via this mechanism anymore. CPU features should + * be handled via the CPU models, i.e. checking with cpu_model_allowed() during + * CPU initialization and s390_has_feat() later should be sufficient. + */ static S390CcwMachineClass *get_machine_class(void) { if (unlikely(!current_mc)) { @@ -521,19 +534,16 @@ static S390CcwMachineClass *get_machine_class(void) bool ri_allowed(void) { - /* for "none" machine this results in true */ return get_machine_class()->ri_allowed; } bool cpu_model_allowed(void) { - /* for "none" machine this results in true */ return get_machine_class()->cpu_model_allowed; } bool hpage_1m_allowed(void) { - /* for "none" machine this results in true */ return get_machine_class()->hpage_1m_allowed; } diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c index f57ce7b739..af0bfbc2ec 100644 --- a/hw/s390x/sclp.c +++ b/hw/s390x/sclp.c @@ -197,24 +197,20 @@ int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code) { SCLPDevice *sclp = get_sclp_device(); SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); - int r = 0; SCCB work_sccb; hwaddr sccb_len = sizeof(SCCB); /* first some basic checks on program checks */ if (env->psw.mask & PSW_MASK_PSTATE) { - r = -PGM_PRIVILEGED; - goto out; + return -PGM_PRIVILEGED; } if (cpu_physical_memory_is_io(sccb)) { - r = -PGM_ADDRESSING; - goto out; + return -PGM_ADDRESSING; } if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa || (sccb & ~0x7ffffff8UL) != 0) { - r = -PGM_SPECIFICATION; - goto out; + return -PGM_SPECIFICATION; } /* @@ -226,8 +222,7 @@ int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code) /* Valid sccb sizes */ if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader)) { - r = -PGM_SPECIFICATION; - goto out; + return -PGM_SPECIFICATION; } switch (code & SCLP_CMD_CODE_MASK) { @@ -257,8 +252,7 @@ out_write: sclp_c->service_interrupt(sclp, sccb); -out: - return r; + return 0; } static void service_interrupt(SCLPDevice *sclp, uint32_t sccb) diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 6580ce5907..13f57e7b67 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -697,6 +697,7 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); SubchDev *sch; Error *err = NULL; + int i; sch = css_create_sch(ccw_dev->devno, errp); if (!sch) { @@ -717,6 +718,9 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) ccw_dev->sch = sch; dev->indicators = NULL; dev->revision = -1; + for (i = 0; i < ADAPTER_ROUTES_MAX_GSI; i++) { + dev->routes.gsi[i] = -1; + } css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE); trace_virtio_ccw_new_device( diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c index e70c20d363..1e9d6f20c1 100644 --- a/target/s390x/excp_helper.c +++ b/target/s390x/excp_helper.c @@ -305,15 +305,14 @@ static void do_ext_interrupt(CPUS390XState *env) if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) && (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) { + MachineState *ms = MACHINE(qdev_get_machine()); + unsigned int max_cpus = ms->smp.max_cpus; + lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY); cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS); g_assert(cpu_addr < S390_MAX_CPUS); lowcore->cpu_addr = cpu_to_be16(cpu_addr); clear_bit(cpu_addr, env->emergency_signals); -#ifndef CONFIG_USER_ONLY - MachineState *ms = MACHINE(qdev_get_machine()); - unsigned int max_cpus = ms->smp.max_cpus; -#endif if (bitmap_empty(env->emergency_signals, max_cpus)) { env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL; } diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c index 15260aeb9a..30112e529c 100644 --- a/target/s390x/kvm.c +++ b/target/s390x/kvm.c @@ -365,10 +365,13 @@ int kvm_arch_init(MachineState *ms, KVMState *s) /* * The migration interface for ais was introduced with kernel 4.13 * but the capability itself had been active since 4.12. As migration - * support is considered necessary let's disable ais in the 2.10 - * machine. + * support is considered necessary, we only try to enable this for + * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available. */ - /* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */ + if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() && + kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) { + kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); + } kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES); return 0; diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c index 727875bb4a..c604f17710 100644 --- a/target/s390x/sigp.c +++ b/target/s390x/sigp.c @@ -348,9 +348,9 @@ static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si) /* If halted (which includes also STOPPED), it is not running */ if (CPU(dst_cpu)->halted) { - si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; - } else { set_sigp_status(si, SIGP_STAT_NOT_RUNNING); + } else { + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } } diff --git a/target/s390x/translate.c b/target/s390x/translate.c index b764ec3140..0bd2073718 100644 --- a/target/s390x/translate.c +++ b/target/s390x/translate.c @@ -53,10 +53,93 @@ typedef struct DisasContext DisasContext; typedef struct DisasInsn DisasInsn; typedef struct DisasFields DisasFields; +/* + * Define a structure to hold the decoded fields. We'll store each inside + * an array indexed by an enum. In order to conserve memory, we'll arrange + * for fields that do not exist at the same time to overlap, thus the "C" + * for compact. For checking purposes there is an "O" for original index + * as well that will be applied to availability bitmaps. + */ + +enum DisasFieldIndexO { + FLD_O_r1, + FLD_O_r2, + FLD_O_r3, + FLD_O_m1, + FLD_O_m3, + FLD_O_m4, + FLD_O_m5, + FLD_O_m6, + FLD_O_b1, + FLD_O_b2, + FLD_O_b4, + FLD_O_d1, + FLD_O_d2, + FLD_O_d4, + FLD_O_x2, + FLD_O_l1, + FLD_O_l2, + FLD_O_i1, + FLD_O_i2, + FLD_O_i3, + FLD_O_i4, + FLD_O_i5, + FLD_O_v1, + FLD_O_v2, + FLD_O_v3, + FLD_O_v4, +}; + +enum DisasFieldIndexC { + FLD_C_r1 = 0, + FLD_C_m1 = 0, + FLD_C_b1 = 0, + FLD_C_i1 = 0, + FLD_C_v1 = 0, + + FLD_C_r2 = 1, + FLD_C_b2 = 1, + FLD_C_i2 = 1, + + FLD_C_r3 = 2, + FLD_C_m3 = 2, + FLD_C_i3 = 2, + FLD_C_v3 = 2, + + FLD_C_m4 = 3, + FLD_C_b4 = 3, + FLD_C_i4 = 3, + FLD_C_l1 = 3, + FLD_C_v4 = 3, + + FLD_C_i5 = 4, + FLD_C_d1 = 4, + FLD_C_m5 = 4, + + FLD_C_d2 = 5, + FLD_C_m6 = 5, + + FLD_C_d4 = 6, + FLD_C_x2 = 6, + FLD_C_l2 = 6, + FLD_C_v2 = 6, + + NUM_C_FIELD = 7 +}; + +struct DisasFields { + uint64_t raw_insn; + unsigned op:8; + unsigned op2:8; + unsigned presentC:16; + unsigned int presentO; + int c[NUM_C_FIELD]; +}; + struct DisasContext { DisasContextBase base; const DisasInsn *insn; - DisasFields *fields; + DisasFields fields; uint64_t ex_value; /* * During translate_one(), pc_tmp is used to determine the instruction @@ -1005,101 +1088,20 @@ typedef enum { #undef F5 #undef F6 -/* Define a structure to hold the decoded fields. We'll store each inside - an array indexed by an enum. In order to conserve memory, we'll arrange - for fields that do not exist at the same time to overlap, thus the "C" - for compact. For checking purposes there is an "O" for original index - as well that will be applied to availability bitmaps. */ - -enum DisasFieldIndexO { - FLD_O_r1, - FLD_O_r2, - FLD_O_r3, - FLD_O_m1, - FLD_O_m3, - FLD_O_m4, - FLD_O_m5, - FLD_O_m6, - FLD_O_b1, - FLD_O_b2, - FLD_O_b4, - FLD_O_d1, - FLD_O_d2, - FLD_O_d4, - FLD_O_x2, - FLD_O_l1, - FLD_O_l2, - FLD_O_i1, - FLD_O_i2, - FLD_O_i3, - FLD_O_i4, - FLD_O_i5, - FLD_O_v1, - FLD_O_v2, - FLD_O_v3, - FLD_O_v4, -}; - -enum DisasFieldIndexC { - FLD_C_r1 = 0, - FLD_C_m1 = 0, - FLD_C_b1 = 0, - FLD_C_i1 = 0, - FLD_C_v1 = 0, - - FLD_C_r2 = 1, - FLD_C_b2 = 1, - FLD_C_i2 = 1, - - FLD_C_r3 = 2, - FLD_C_m3 = 2, - FLD_C_i3 = 2, - FLD_C_v3 = 2, - - FLD_C_m4 = 3, - FLD_C_b4 = 3, - FLD_C_i4 = 3, - FLD_C_l1 = 3, - FLD_C_v4 = 3, - - FLD_C_i5 = 4, - FLD_C_d1 = 4, - FLD_C_m5 = 4, - - FLD_C_d2 = 5, - FLD_C_m6 = 5, - - FLD_C_d4 = 6, - FLD_C_x2 = 6, - FLD_C_l2 = 6, - FLD_C_v2 = 6, - - NUM_C_FIELD = 7 -}; - -struct DisasFields { - uint64_t raw_insn; - unsigned op:8; - unsigned op2:8; - unsigned presentC:16; - unsigned int presentO; - int c[NUM_C_FIELD]; -}; - /* This is the way fields are to be accessed out of DisasFields. */ #define have_field(S, F) have_field1((S), FLD_O_##F) #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F) -static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c) +static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c) { - return (f->presentO >> c) & 1; + return (s->fields.presentO >> c) & 1; } -static int get_field1(const DisasFields *f, enum DisasFieldIndexO o, +static int get_field1(const DisasContext *s, enum DisasFieldIndexO o, enum DisasFieldIndexC c) { - assert(have_field1(f, o)); - return f->c[c]; + assert(have_field1(s, o)); + return s->fields.c[c]; } /* Describe the layout of each field in each format. */ @@ -1221,15 +1223,15 @@ struct DisasInsn { const char *name; /* Pre-process arguments before HELP_OP. */ - void (*help_in1)(DisasContext *, DisasFields *, DisasOps *); - void (*help_in2)(DisasContext *, DisasFields *, DisasOps *); - void (*help_prep)(DisasContext *, DisasFields *, DisasOps *); + void (*help_in1)(DisasContext *, DisasOps *); + void (*help_in2)(DisasContext *, DisasOps *); + void (*help_prep)(DisasContext *, DisasOps *); /* * Post-process output after HELP_OP. * Note that these are not called if HELP_OP returns DISAS_NORETURN. */ - void (*help_wout)(DisasContext *, DisasFields *, DisasOps *); + void (*help_wout)(DisasContext *, DisasOps *); void (*help_cout)(DisasContext *, DisasOps *); /* Implement the operation itself. */ @@ -1241,11 +1243,10 @@ struct DisasInsn { /* ====================================================================== */ /* Miscellaneous helpers, used by several operations. */ -static void help_l2_shift(DisasContext *s, DisasFields *f, - DisasOps *o, int mask) +static void help_l2_shift(DisasContext *s, DisasOps *o, int mask) { - int b2 = get_field(f, b2); - int d2 = get_field(f, d2); + int b2 = get_field(s, b2); + int d2 = get_field(s, d2); if (b2 == 0) { o->in2 = tcg_const_i64(d2 & mask); @@ -1600,18 +1601,18 @@ static DisasJumpType op_bal(DisasContext *s, DisasOps *o) static DisasJumpType op_basi(DisasContext *s, DisasOps *o) { pc_to_link_info(o->out, s, s->pc_tmp); - return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2)); + return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2)); } static DisasJumpType op_bc(DisasContext *s, DisasOps *o) { - int m1 = get_field(s->fields, m1); - bool is_imm = have_field(s->fields, i2); - int imm = is_imm ? get_field(s->fields, i2) : 0; + int m1 = get_field(s, m1); + bool is_imm = have_field(s, i2); + int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; /* BCR with R2 = 0 causes no branching */ - if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) { + if (have_field(s, r2) && get_field(s, r2) == 0) { if (m1 == 14) { /* Perform serialization */ /* FIXME: check for fast-BCR-serialization facility */ @@ -1631,9 +1632,9 @@ static DisasJumpType op_bc(DisasContext *s, DisasOps *o) static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - bool is_imm = have_field(s->fields, i2); - int imm = is_imm ? get_field(s->fields, i2) : 0; + int r1 = get_field(s, r1); + bool is_imm = have_field(s, i2); + int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; TCGv_i64 t; @@ -1655,8 +1656,8 @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int imm = get_field(s->fields, i2); + int r1 = get_field(s, r1); + int imm = get_field(s, i2); DisasCompare c; TCGv_i64 t; @@ -1679,9 +1680,9 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - bool is_imm = have_field(s->fields, i2); - int imm = is_imm ? get_field(s->fields, i2) : 0; + int r1 = get_field(s, r1); + bool is_imm = have_field(s, i2); + int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; c.cond = TCG_COND_NE; @@ -1698,10 +1699,10 @@ static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); - bool is_imm = have_field(s->fields, i2); - int imm = is_imm ? get_field(s->fields, i2) : 0; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + bool is_imm = have_field(s, i2); + int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; TCGv_i64 t; @@ -1724,10 +1725,10 @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); - bool is_imm = have_field(s->fields, i2); - int imm = is_imm ? get_field(s->fields, i2) : 0; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + bool is_imm = have_field(s, i2); + int imm = is_imm ? get_field(s, i2) : 0; DisasCompare c; c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); @@ -1750,7 +1751,7 @@ static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) static DisasJumpType op_cj(DisasContext *s, DisasOps *o) { - int imm, m3 = get_field(s->fields, m3); + int imm, m3 = get_field(s, m3); bool is_imm; DisasCompare c; @@ -1762,13 +1763,13 @@ static DisasJumpType op_cj(DisasContext *s, DisasOps *o) c.u.s64.a = o->in1; c.u.s64.b = o->in2; - is_imm = have_field(s->fields, i4); + is_imm = have_field(s, i4); if (is_imm) { - imm = get_field(s->fields, i4); + imm = get_field(s, i4); } else { imm = 0; - o->out = get_address(s, 0, get_field(s->fields, b4), - get_field(s->fields, d4)); + o->out = get_address(s, 0, get_field(s, b4), + get_field(s, d4)); } return help_branch(s, &c, is_imm, imm, o->out); @@ -1799,8 +1800,8 @@ static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe, bool m4_with_fpe) { const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT); - uint8_t m3 = get_field(s->fields, m3); - uint8_t m4 = get_field(s->fields, m4); + uint8_t m3 = get_field(s, m3); + uint8_t m4 = get_field(s, m4); /* m3 field was introduced with FPE */ if (!fpe && m3_with_fpe) { @@ -2052,7 +2053,7 @@ static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o) static DisasJumpType op_cksm(DisasContext *s, DisasOps *o) { - int r2 = get_field(s->fields, r2); + int r2 = get_field(s, r2); TCGv_i64 len = tcg_temp_new_i64(); gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]); @@ -2068,7 +2069,7 @@ static DisasJumpType op_cksm(DisasContext *s, DisasOps *o) static DisasJumpType op_clc(DisasContext *s, DisasOps *o) { - int l = get_field(s->fields, l1); + int l = get_field(s, l1); TCGv_i32 vl; switch (l + 1) { @@ -2101,8 +2102,8 @@ static DisasJumpType op_clc(DisasContext *s, DisasOps *o) static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r2 = get_field(s->fields, r2); + int r1 = get_field(s, r1); + int r2 = get_field(s, r2); TCGv_i32 t1, t2; /* r1 and r2 must be even. */ @@ -2122,8 +2123,8 @@ static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) static DisasJumpType op_clcle(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); TCGv_i32 t1, t3; /* r1 and r3 must be even. */ @@ -2143,8 +2144,8 @@ static DisasJumpType op_clcle(DisasContext *s, DisasOps *o) static DisasJumpType op_clclu(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); TCGv_i32 t1, t3; /* r1 and r3 must be even. */ @@ -2164,7 +2165,7 @@ static DisasJumpType op_clclu(DisasContext *s, DisasOps *o) static DisasJumpType op_clm(DisasContext *s, DisasOps *o) { - TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); + TCGv_i32 m3 = tcg_const_i32(get_field(s, m3)); TCGv_i32 t1 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(t1, o->in1); gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2); @@ -2194,8 +2195,8 @@ static DisasJumpType op_cps(DisasContext *s, DisasOps *o) static DisasJumpType op_cs(DisasContext *s, DisasOps *o) { - int d2 = get_field(s->fields, d2); - int b2 = get_field(s->fields, b2); + int d2 = get_field(s, d2); + int b2 = get_field(s, b2); TCGv_i64 addr, cc; /* Note that in1 = R3 (new value) and @@ -2219,10 +2220,10 @@ static DisasJumpType op_cs(DisasContext *s, DisasOps *o) static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); - int d2 = get_field(s->fields, d2); - int b2 = get_field(s->fields, b2); + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + int d2 = get_field(s, d2); + int b2 = get_field(s, b2); DisasJumpType ret = DISAS_NEXT; TCGv_i64 addr; TCGv_i32 t_r1, t_r3; @@ -2249,7 +2250,7 @@ static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) static DisasJumpType op_csst(DisasContext *s, DisasOps *o) { - int r3 = get_field(s->fields, r3); + int r3 = get_field(s, r3); TCGv_i32 t_r3 = tcg_const_i32(r3); if (tb_cflags(s->base.tb) & CF_PARALLEL) { @@ -2322,7 +2323,7 @@ static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) static DisasJumpType op_ct(DisasContext *s, DisasOps *o) { - int m3 = get_field(s->fields, m3); + int m3 = get_field(s, m3); TCGLabel *lab = gen_new_label(); TCGCond c; @@ -2341,9 +2342,9 @@ static DisasJumpType op_ct(DisasContext *s, DisasOps *o) static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o) { - int m3 = get_field(s->fields, m3); - int r1 = get_field(s->fields, r1); - int r2 = get_field(s->fields, r2); + int m3 = get_field(s, m3); + int r1 = get_field(s, r1); + int r2 = get_field(s, r2); TCGv_i32 tr1, tr2, chk; /* R1 and R2 must both be even. */ @@ -2392,9 +2393,9 @@ static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_diag(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); - TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); + TCGv_i32 func_code = tcg_const_i32(get_field(s, i2)); gen_helper_diag(cpu_env, r1, r3, func_code); @@ -2454,7 +2455,7 @@ static DisasJumpType op_dxb(DisasContext *s, DisasOps *o) static DisasJumpType op_ear(DisasContext *s, DisasOps *o) { - int r2 = get_field(s->fields, r2); + int r2 = get_field(s, r2); tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2])); return DISAS_NEXT; } @@ -2474,8 +2475,8 @@ static DisasJumpType op_efpc(DisasContext *s, DisasOps *o) static DisasJumpType op_epsw(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r2 = get_field(s->fields, r2); + int r1 = get_field(s, r1); + int r2 = get_field(s, r2); TCGv_i64 t = tcg_temp_new_i64(); /* Note the "subsequently" in the PoO, which implies a defined result @@ -2492,7 +2493,7 @@ static DisasJumpType op_epsw(DisasContext *s, DisasOps *o) static DisasJumpType op_ex(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); + int r1 = get_field(s, r1); TCGv_i32 ilen; TCGv_i64 v1; @@ -2581,7 +2582,7 @@ static DisasJumpType op_flogr(DisasContext *s, DisasOps *o) static DisasJumpType op_icm(DisasContext *s, DisasOps *o) { - int m3 = get_field(s->fields, m3); + int m3 = get_field(s, m3); int pos, len, base = s->insn->data; TCGv_i64 tmp = tcg_temp_new_i64(); uint64_t ccm; @@ -2669,7 +2670,7 @@ static DisasJumpType op_idte(DisasContext *s, DisasOps *o) TCGv_i32 m4; if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { - m4 = tcg_const_i32(get_field(s->fields, m4)); + m4 = tcg_const_i32(get_field(s, m4)); } else { m4 = tcg_const_i32(0); } @@ -2683,7 +2684,7 @@ static DisasJumpType op_ipte(DisasContext *s, DisasOps *o) TCGv_i32 m4; if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { - m4 = tcg_const_i32(get_field(s->fields, m4)); + m4 = tcg_const_i32(get_field(s, m4)); } else { m4 = tcg_const_i32(0); } @@ -2701,9 +2702,9 @@ static DisasJumpType op_iske(DisasContext *s, DisasOps *o) static DisasJumpType op_msa(DisasContext *s, DisasOps *o) { - int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0; - int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0; - int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0; + int r1 = have_field(s, r1) ? get_field(s, r1) : 0; + int r2 = have_field(s, r2) ? get_field(s, r2) : 0; + int r3 = have_field(s, r3) ? get_field(s, r3) : 0; TCGv_i32 t_r1, t_r2, t_r3, type; switch (s->insn->data) { @@ -2929,7 +2930,7 @@ static DisasJumpType op_ld64(DisasContext *s, DisasOps *o) static DisasJumpType op_lat(DisasContext *s, DisasOps *o) { TCGLabel *lab = gen_new_label(); - store_reg32_i64(get_field(s->fields, r1), o->in2); + store_reg32_i64(get_field(s, r1), o->in2); /* The value is stored even in case of trap. */ tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); gen_trap(s); @@ -2951,7 +2952,7 @@ static DisasJumpType op_lgat(DisasContext *s, DisasOps *o) static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o) { TCGLabel *lab = gen_new_label(); - store_reg32h_i64(get_field(s->fields, r1), o->in2); + store_reg32h_i64(get_field(s, r1), o->in2); /* The value is stored even in case of trap. */ tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); gen_trap(s); @@ -2985,7 +2986,7 @@ static DisasJumpType op_loc(DisasContext *s, DisasOps *o) { DisasCompare c; - disas_jcc(s, &c, get_field(s->fields, m3)); + disas_jcc(s, &c, get_field(s, m3)); if (c.is_64) { tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b, @@ -3014,8 +3015,8 @@ static DisasJumpType op_loc(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_lctl(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); gen_helper_lctl(cpu_env, r1, o->in2, r3); tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); @@ -3025,8 +3026,8 @@ static DisasJumpType op_lctl(DisasContext *s, DisasOps *o) static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); gen_helper_lctlg(cpu_env, r1, o->in2, r3); tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); @@ -3088,8 +3089,8 @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) static DisasJumpType op_lam(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); gen_helper_lam(cpu_env, r1, o->in2, r3); tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); @@ -3098,8 +3099,8 @@ static DisasJumpType op_lam(DisasContext *s, DisasOps *o) static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); TCGv_i64 t1, t2; /* Only one register to read. */ @@ -3144,8 +3145,8 @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); TCGv_i64 t1, t2; /* Only one register to read. */ @@ -3190,8 +3191,8 @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); TCGv_i64 t1, t2; /* Only one register to read. */ @@ -3243,8 +3244,8 @@ static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) } /* In a serial context, perform the two loads ... */ - a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1)); - a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2)); + a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); + a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2)); tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN); tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN); tcg_temp_free_i64(a1); @@ -3272,7 +3273,7 @@ static DisasJumpType op_lpq(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_lura(DisasContext *s, DisasOps *o) { - o->addr1 = get_address(s, 0, get_field(s->fields, r2), 0); + o->addr1 = get_address(s, 0, get_field(s, r2), 0); tcg_gen_qemu_ld_tl(o->out, o->addr1, MMU_REAL_IDX, s->insn->data); return DISAS_NEXT; } @@ -3286,9 +3287,9 @@ static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o) static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o) { - const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6)); + const int64_t block_size = (1ull << (get_field(s, m3) + 6)); - if (get_field(s->fields, m3) > 6) { + if (get_field(s, m3) > 6) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } @@ -3312,7 +3313,7 @@ static DisasJumpType op_mov2(DisasContext *s, DisasOps *o) static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) { - int b2 = get_field(s->fields, b2); + int b2 = get_field(s, b2); TCGv ar1 = tcg_temp_new_i64(); o->out = o->in2; @@ -3359,7 +3360,7 @@ static DisasJumpType op_movx(DisasContext *s, DisasOps *o) static DisasJumpType op_mvc(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_mvc(cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); return DISAS_NEXT; @@ -3367,7 +3368,7 @@ static DisasJumpType op_mvc(DisasContext *s, DisasOps *o) static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_mvcin(cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); return DISAS_NEXT; @@ -3375,8 +3376,8 @@ static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o) static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r2 = get_field(s->fields, r2); + int r1 = get_field(s, r1); + int r2 = get_field(s, r2); TCGv_i32 t1, t2; /* r1 and r2 must be even. */ @@ -3396,8 +3397,8 @@ static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o) static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); TCGv_i32 t1, t3; /* r1 and r3 must be even. */ @@ -3417,8 +3418,8 @@ static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o) static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); TCGv_i32 t1, t3; /* r1 and r3 must be even. */ @@ -3438,7 +3439,7 @@ static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o) static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o) { - int r3 = get_field(s->fields, r3); + int r3 = get_field(s, r3); gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]); set_cc_static(s); return DISAS_NEXT; @@ -3447,7 +3448,7 @@ static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, l1); + int r1 = get_field(s, l1); gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2); set_cc_static(s); return DISAS_NEXT; @@ -3455,7 +3456,7 @@ static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, l1); + int r1 = get_field(s, l1); gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2); set_cc_static(s); return DISAS_NEXT; @@ -3464,7 +3465,7 @@ static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o) static DisasJumpType op_mvn(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_mvn(cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); return DISAS_NEXT; @@ -3472,7 +3473,7 @@ static DisasJumpType op_mvn(DisasContext *s, DisasOps *o) static DisasJumpType op_mvo(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_mvo(cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); return DISAS_NEXT; @@ -3487,8 +3488,8 @@ static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o) static DisasJumpType op_mvst(DisasContext *s, DisasOps *o) { - TCGv_i32 t1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 t2 = tcg_const_i32(get_field(s->fields, r2)); + TCGv_i32 t1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 t2 = tcg_const_i32(get_field(s, r2)); gen_helper_mvst(cc_op, cpu_env, t1, t2); tcg_temp_free_i32(t1); @@ -3499,7 +3500,7 @@ static DisasJumpType op_mvst(DisasContext *s, DisasOps *o) static DisasJumpType op_mvz(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_mvz(cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); return DISAS_NEXT; @@ -3551,7 +3552,7 @@ static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o) static DisasJumpType op_maeb(DisasContext *s, DisasOps *o) { - TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3)); + TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3); tcg_temp_free_i64(r3); return DISAS_NEXT; @@ -3559,7 +3560,7 @@ static DisasJumpType op_maeb(DisasContext *s, DisasOps *o) static DisasJumpType op_madb(DisasContext *s, DisasOps *o) { - TCGv_i64 r3 = load_freg(get_field(s->fields, r3)); + TCGv_i64 r3 = load_freg(get_field(s, r3)); gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3); tcg_temp_free_i64(r3); return DISAS_NEXT; @@ -3567,7 +3568,7 @@ static DisasJumpType op_madb(DisasContext *s, DisasOps *o) static DisasJumpType op_mseb(DisasContext *s, DisasOps *o) { - TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3)); + TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3); tcg_temp_free_i64(r3); return DISAS_NEXT; @@ -3575,7 +3576,7 @@ static DisasJumpType op_mseb(DisasContext *s, DisasOps *o) static DisasJumpType op_msdb(DisasContext *s, DisasOps *o) { - TCGv_i64 r3 = load_freg(get_field(s->fields, r3)); + TCGv_i64 r3 = load_freg(get_field(s, r3)); gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3); tcg_temp_free_i64(r3); return DISAS_NEXT; @@ -3614,7 +3615,7 @@ static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o) static DisasJumpType op_nc(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); set_cc_static(s); @@ -3648,7 +3649,7 @@ static DisasJumpType op_negf128(DisasContext *s, DisasOps *o) static DisasJumpType op_oc(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); set_cc_static(s); @@ -3700,7 +3701,7 @@ static DisasJumpType op_oi(DisasContext *s, DisasOps *o) static DisasJumpType op_pack(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_pack(cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); return DISAS_NEXT; @@ -3708,7 +3709,7 @@ static DisasJumpType op_pack(DisasContext *s, DisasOps *o) static DisasJumpType op_pka(DisasContext *s, DisasOps *o) { - int l2 = get_field(s->fields, l2) + 1; + int l2 = get_field(s, l2) + 1; TCGv_i32 l; /* The length must not exceed 32 bytes. */ @@ -3724,7 +3725,7 @@ static DisasJumpType op_pka(DisasContext *s, DisasOps *o) static DisasJumpType op_pku(DisasContext *s, DisasOps *o) { - int l2 = get_field(s->fields, l2) + 1; + int l2 = get_field(s, l2) + 1; TCGv_i32 l; /* The length must be even and should not exceed 64 bytes. */ @@ -3754,15 +3755,15 @@ static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o) static DisasJumpType op_risbg(DisasContext *s, DisasOps *o) { - int i3 = get_field(s->fields, i3); - int i4 = get_field(s->fields, i4); - int i5 = get_field(s->fields, i5); + int i3 = get_field(s, i3); + int i4 = get_field(s, i4); + int i5 = get_field(s, i5); int do_zero = i4 & 0x80; uint64_t mask, imask, pmask; int pos, len, rot; /* Adjust the arguments for the specific insn. */ - switch (s->fields->op2) { + switch (s->fields.op2) { case 0x55: /* risbg */ case 0x59: /* risbgn */ i3 &= 63; @@ -3803,7 +3804,7 @@ static DisasJumpType op_risbg(DisasContext *s, DisasOps *o) len = i4 - i3 + 1; pos = 63 - i4; rot = i5 & 63; - if (s->fields->op2 == 0x5d) { + if (s->fields.op2 == 0x5d) { pos += 32; } @@ -3844,9 +3845,9 @@ static DisasJumpType op_risbg(DisasContext *s, DisasOps *o) static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o) { - int i3 = get_field(s->fields, i3); - int i4 = get_field(s->fields, i4); - int i5 = get_field(s->fields, i5); + int i3 = get_field(s, i3); + int i4 = get_field(s, i4); + int i5 = get_field(s, i5); uint64_t mask; /* If this is a test-only form, arrange to discard the result. */ @@ -3872,7 +3873,7 @@ static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o) tcg_gen_rotli_i64(o->in2, o->in2, i5); /* Operate. */ - switch (s->fields->op2) { + switch (s->fields.op2) { case 0x55: /* AND */ tcg_gen_ori_i64(o->in2, o->in2, ~mask); tcg_gen_and_i64(o->out, o->out, o->in2); @@ -3987,7 +3988,7 @@ static DisasJumpType op_sam(DisasContext *s, DisasOps *o) static DisasJumpType op_sar(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); + int r1 = get_field(s, r1); tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1])); return DISAS_NEXT; } @@ -4040,8 +4041,8 @@ static DisasJumpType op_servc(DisasContext *s, DisasOps *o) static DisasJumpType op_sigp(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3); set_cc_static(s); tcg_temp_free_i32(r1); @@ -4057,7 +4058,7 @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o) TCGLabel *lab; int r1; - disas_jcc(s, &c, get_field(s->fields, m3)); + disas_jcc(s, &c, get_field(s, m3)); /* We want to store when the condition is fulfilled, so branch out when it's not */ @@ -4071,8 +4072,8 @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o) } free_compare(&c); - r1 = get_field(s->fields, r1); - a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2)); + r1 = get_field(s, r1); + a = get_address(s, 0, get_field(s, b2), get_field(s, d2)); switch (s->insn->data) { case 1: /* STOCG */ tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s)); @@ -4184,11 +4185,11 @@ static DisasJumpType op_spm(DisasContext *s, DisasOps *o) static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) { - int b1 = get_field(s->fields, b1); - int d1 = get_field(s->fields, d1); - int b2 = get_field(s->fields, b2); - int d2 = get_field(s->fields, d2); - int r3 = get_field(s->fields, r3); + int b1 = get_field(s, b1); + int d1 = get_field(s, d1); + int b2 = get_field(s, b2); + int d2 = get_field(s, d2); + int r3 = get_field(s, r3); TCGv_i64 tmp = tcg_temp_new_i64(); /* fetch all operands first */ @@ -4304,8 +4305,8 @@ static DisasJumpType op_stckc(DisasContext *s, DisasOps *o) static DisasJumpType op_stctg(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); gen_helper_stctg(cpu_env, r1, o->in2, r3); tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); @@ -4314,8 +4315,8 @@ static DisasJumpType op_stctg(DisasContext *s, DisasOps *o) static DisasJumpType op_stctl(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); gen_helper_stctl(cpu_env, r1, o->in2, r3); tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); @@ -4477,7 +4478,7 @@ static DisasJumpType op_stpx(DisasContext *s, DisasOps *o) static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) { - uint64_t i2 = get_field(s->fields, i2); + uint64_t i2 = get_field(s, i2); TCGv_i64 t; /* It is important to do what the instruction name says: STORE THEN. @@ -4488,7 +4489,7 @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s)); tcg_temp_free_i64(t); - if (s->fields->op == 0xac) { + if (s->fields.op == 0xac) { tcg_gen_andi_i64(psw_mask, psw_mask, (i2 << 56) | 0x00ffffffffffffffull); } else { @@ -4501,7 +4502,7 @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) static DisasJumpType op_stura(DisasContext *s, DisasOps *o) { - o->addr1 = get_address(s, 0, get_field(s->fields, r2), 0); + o->addr1 = get_address(s, 0, get_field(s, r2), 0); tcg_gen_qemu_st_tl(o->in1, o->addr1, MMU_REAL_IDX, s->insn->data); if (s->base.tb->flags & FLAG_MASK_PER) { @@ -4545,8 +4546,8 @@ static DisasJumpType op_st64(DisasContext *s, DisasOps *o) static DisasJumpType op_stam(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); gen_helper_stam(cpu_env, r1, o->in2, r3); tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); @@ -4555,7 +4556,7 @@ static DisasJumpType op_stam(DisasContext *s, DisasOps *o) static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) { - int m3 = get_field(s->fields, m3); + int m3 = get_field(s, m3); int pos, base = s->insn->data; TCGv_i64 tmp = tcg_temp_new_i64(); @@ -4604,8 +4605,8 @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) static DisasJumpType op_stm(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); int size = s->insn->data; TCGv_i64 tsize = tcg_const_i64(size); @@ -4628,8 +4629,8 @@ static DisasJumpType op_stm(DisasContext *s, DisasOps *o) static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) { - int r1 = get_field(s->fields, r1); - int r3 = get_field(s->fields, r3); + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); TCGv_i64 t = tcg_temp_new_i64(); TCGv_i64 t4 = tcg_const_i64(4); TCGv_i64 t32 = tcg_const_i64(32); @@ -4665,8 +4666,8 @@ static DisasJumpType op_stpq(DisasContext *s, DisasOps *o) static DisasJumpType op_srst(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); gen_helper_srst(cpu_env, r1, r2); @@ -4678,8 +4679,8 @@ static DisasJumpType op_srst(DisasContext *s, DisasOps *o) static DisasJumpType op_srstu(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); gen_helper_srstu(cpu_env, r1, r2); @@ -4728,7 +4729,7 @@ static DisasJumpType op_svc(DisasContext *s, DisasOps *o) update_psw_addr(s); update_cc_op(s); - t = tcg_const_i32(get_field(s->fields, i1) & 0xff); + t = tcg_const_i32(get_field(s, i1) & 0xff); tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code)); tcg_temp_free_i32(t); @@ -4791,7 +4792,7 @@ static DisasJumpType op_tprot(DisasContext *s, DisasOps *o) static DisasJumpType op_tp(DisasContext *s, DisasOps *o) { - TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1); + TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1); gen_helper_tp(cc_op, cpu_env, o->addr1, l1); tcg_temp_free_i32(l1); set_cc_static(s); @@ -4800,7 +4801,7 @@ static DisasJumpType op_tp(DisasContext *s, DisasOps *o) static DisasJumpType op_tr(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_tr(cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); set_cc_static(s); @@ -4817,7 +4818,7 @@ static DisasJumpType op_tre(DisasContext *s, DisasOps *o) static DisasJumpType op_trt(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); set_cc_static(s); @@ -4826,7 +4827,7 @@ static DisasJumpType op_trt(DisasContext *s, DisasOps *o) static DisasJumpType op_trtr(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); set_cc_static(s); @@ -4835,11 +4836,11 @@ static DisasJumpType op_trtr(DisasContext *s, DisasOps *o) static DisasJumpType op_trXX(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3); TCGv_i32 tst = tcg_temp_new_i32(); - int m3 = get_field(s->fields, m3); + int m3 = get_field(s, m3); if (!s390_has_feat(S390_FEAT_ETF2_ENH)) { m3 = 0; @@ -4876,7 +4877,7 @@ static DisasJumpType op_ts(DisasContext *s, DisasOps *o) static DisasJumpType op_unpk(DisasContext *s, DisasOps *o) { - TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); + TCGv_i32 l = tcg_const_i32(get_field(s, l1)); gen_helper_unpk(cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(l); return DISAS_NEXT; @@ -4884,7 +4885,7 @@ static DisasJumpType op_unpk(DisasContext *s, DisasOps *o) static DisasJumpType op_unpka(DisasContext *s, DisasOps *o) { - int l1 = get_field(s->fields, l1) + 1; + int l1 = get_field(s, l1) + 1; TCGv_i32 l; /* The length must not exceed 32 bytes. */ @@ -4901,7 +4902,7 @@ static DisasJumpType op_unpka(DisasContext *s, DisasOps *o) static DisasJumpType op_unpku(DisasContext *s, DisasOps *o) { - int l1 = get_field(s->fields, l1) + 1; + int l1 = get_field(s, l1) + 1; TCGv_i32 l; /* The length must be even and should not exceed 64 bytes. */ @@ -4919,11 +4920,11 @@ static DisasJumpType op_unpku(DisasContext *s, DisasOps *o) static DisasJumpType op_xc(DisasContext *s, DisasOps *o) { - int d1 = get_field(s->fields, d1); - int d2 = get_field(s->fields, d2); - int b1 = get_field(s->fields, b1); - int b2 = get_field(s->fields, b2); - int l = get_field(s->fields, l1); + int d1 = get_field(s, d1); + int d2 = get_field(s, d2); + int b1 = get_field(s, b1); + int b2 = get_field(s, b2); + int l = get_field(s, l1); TCGv_i32 t32; o->addr1 = get_address(s, 0, b1, d1); @@ -5030,7 +5031,7 @@ static DisasJumpType op_zero2(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_clp(DisasContext *s, DisasOps *o) { - TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); + TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); gen_helper_clp(cpu_env, r2); tcg_temp_free_i32(r2); @@ -5040,8 +5041,8 @@ static DisasJumpType op_clp(DisasContext *s, DisasOps *o) static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); gen_helper_pcilg(cpu_env, r1, r2); tcg_temp_free_i32(r1); @@ -5052,8 +5053,8 @@ static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o) static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); gen_helper_pcistg(cpu_env, r1, r2); tcg_temp_free_i32(r1); @@ -5064,8 +5065,8 @@ static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o) static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 ar = tcg_const_i32(get_field(s, b2)); gen_helper_stpcifc(cpu_env, r1, o->addr1, ar); tcg_temp_free_i32(ar); @@ -5082,8 +5083,8 @@ static DisasJumpType op_sic(DisasContext *s, DisasOps *o) static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); gen_helper_rpcit(cpu_env, r1, r2); tcg_temp_free_i32(r1); @@ -5094,9 +5095,9 @@ static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o) static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); - TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); + TCGv_i32 ar = tcg_const_i32(get_field(s, b2)); gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar); tcg_temp_free_i32(ar); @@ -5108,8 +5109,8 @@ static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o) static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o) { - TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); - TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2)); + TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); + TCGv_i32 ar = tcg_const_i32(get_field(s, b2)); gen_helper_mpcifc(cpu_env, r1, o->addr1, ar); tcg_temp_free_i32(ar); @@ -5288,29 +5289,29 @@ static void cout_tm64(DisasContext *s, DisasOps *o) the "wout" generators, in some cases we need a new temporary, and in some cases we can write to a TCG global. */ -static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o) +static void prep_new(DisasContext *s, DisasOps *o) { o->out = tcg_temp_new_i64(); } #define SPEC_prep_new 0 -static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o) +static void prep_new_P(DisasContext *s, DisasOps *o) { o->out = tcg_temp_new_i64(); o->out2 = tcg_temp_new_i64(); } #define SPEC_prep_new_P 0 -static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o) +static void prep_r1(DisasContext *s, DisasOps *o) { - o->out = regs[get_field(f, r1)]; + o->out = regs[get_field(s, r1)]; o->g_out = true; } #define SPEC_prep_r1 0 -static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o) +static void prep_r1_P(DisasContext *s, DisasOps *o) { - int r1 = get_field(f, r1); + int r1 = get_field(s, r1); o->out = regs[r1]; o->out2 = regs[r1 + 1]; o->g_out = o->g_out2 = true; @@ -5318,10 +5319,10 @@ static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o) #define SPEC_prep_r1_P SPEC_r1_even /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */ -static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o) +static void prep_x1(DisasContext *s, DisasOps *o) { - o->out = load_freg(get_field(f, r1)); - o->out2 = load_freg(get_field(f, r1) + 2); + o->out = load_freg(get_field(s, r1)); + o->out2 = load_freg(get_field(s, r1) + 2); } #define SPEC_prep_x1 SPEC_r1_f128 @@ -5331,367 +5332,367 @@ static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o) generally handled by having a "prep" generator install the TCG global as the destination of the operation. */ -static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_r1(DisasContext *s, DisasOps *o) { - store_reg(get_field(f, r1), o->out); + store_reg(get_field(s, r1), o->out); } #define SPEC_wout_r1 0 -static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_r1_8(DisasContext *s, DisasOps *o) { - int r1 = get_field(f, r1); + int r1 = get_field(s, r1); tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8); } #define SPEC_wout_r1_8 0 -static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_r1_16(DisasContext *s, DisasOps *o) { - int r1 = get_field(f, r1); + int r1 = get_field(s, r1); tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16); } #define SPEC_wout_r1_16 0 -static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_r1_32(DisasContext *s, DisasOps *o) { - store_reg32_i64(get_field(f, r1), o->out); + store_reg32_i64(get_field(s, r1), o->out); } #define SPEC_wout_r1_32 0 -static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_r1_32h(DisasContext *s, DisasOps *o) { - store_reg32h_i64(get_field(f, r1), o->out); + store_reg32h_i64(get_field(s, r1), o->out); } #define SPEC_wout_r1_32h 0 -static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_r1_P32(DisasContext *s, DisasOps *o) { - int r1 = get_field(f, r1); + int r1 = get_field(s, r1); store_reg32_i64(r1, o->out); store_reg32_i64(r1 + 1, o->out2); } #define SPEC_wout_r1_P32 SPEC_r1_even -static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_r1_D32(DisasContext *s, DisasOps *o) { - int r1 = get_field(f, r1); + int r1 = get_field(s, r1); store_reg32_i64(r1 + 1, o->out); tcg_gen_shri_i64(o->out, o->out, 32); store_reg32_i64(r1, o->out); } #define SPEC_wout_r1_D32 SPEC_r1_even -static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_r3_P32(DisasContext *s, DisasOps *o) { - int r3 = get_field(f, r3); + int r3 = get_field(s, r3); store_reg32_i64(r3, o->out); store_reg32_i64(r3 + 1, o->out2); } #define SPEC_wout_r3_P32 SPEC_r3_even -static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_r3_P64(DisasContext *s, DisasOps *o) { - int r3 = get_field(f, r3); + int r3 = get_field(s, r3); store_reg(r3, o->out); store_reg(r3 + 1, o->out2); } #define SPEC_wout_r3_P64 SPEC_r3_even -static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_e1(DisasContext *s, DisasOps *o) { - store_freg32_i64(get_field(f, r1), o->out); + store_freg32_i64(get_field(s, r1), o->out); } #define SPEC_wout_e1 0 -static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_f1(DisasContext *s, DisasOps *o) { - store_freg(get_field(f, r1), o->out); + store_freg(get_field(s, r1), o->out); } #define SPEC_wout_f1 0 -static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_x1(DisasContext *s, DisasOps *o) { - int f1 = get_field(s->fields, r1); + int f1 = get_field(s, r1); store_freg(f1, o->out); store_freg(f1 + 2, o->out2); } #define SPEC_wout_x1 SPEC_r1_f128 -static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o) { - if (get_field(f, r1) != get_field(f, r2)) { - store_reg32_i64(get_field(f, r1), o->out); + if (get_field(s, r1) != get_field(s, r2)) { + store_reg32_i64(get_field(s, r1), o->out); } } #define SPEC_wout_cond_r1r2_32 0 -static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_cond_e1e2(DisasContext *s, DisasOps *o) { - if (get_field(f, r1) != get_field(f, r2)) { - store_freg32_i64(get_field(f, r1), o->out); + if (get_field(s, r1) != get_field(s, r2)) { + store_freg32_i64(get_field(s, r1), o->out); } } #define SPEC_wout_cond_e1e2 0 -static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_m1_8(DisasContext *s, DisasOps *o) { tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s)); } #define SPEC_wout_m1_8 0 -static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_m1_16(DisasContext *s, DisasOps *o) { tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s)); } #define SPEC_wout_m1_16 0 #ifndef CONFIG_USER_ONLY -static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_m1_16a(DisasContext *s, DisasOps *o) { tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN); } #define SPEC_wout_m1_16a 0 #endif -static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_m1_32(DisasContext *s, DisasOps *o) { tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s)); } #define SPEC_wout_m1_32 0 #ifndef CONFIG_USER_ONLY -static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_m1_32a(DisasContext *s, DisasOps *o) { tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN); } #define SPEC_wout_m1_32a 0 #endif -static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_m1_64(DisasContext *s, DisasOps *o) { tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s)); } #define SPEC_wout_m1_64 0 #ifndef CONFIG_USER_ONLY -static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_m1_64a(DisasContext *s, DisasOps *o) { tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); } #define SPEC_wout_m1_64a 0 #endif -static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_m2_32(DisasContext *s, DisasOps *o) { tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s)); } #define SPEC_wout_m2_32 0 -static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_in2_r1(DisasContext *s, DisasOps *o) { - store_reg(get_field(f, r1), o->in2); + store_reg(get_field(s, r1), o->in2); } #define SPEC_wout_in2_r1 0 -static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o) +static void wout_in2_r1_32(DisasContext *s, DisasOps *o) { - store_reg32_i64(get_field(f, r1), o->in2); + store_reg32_i64(get_field(s, r1), o->in2); } #define SPEC_wout_in2_r1_32 0 /* ====================================================================== */ /* The "INput 1" generators. These load the first operand to an insn. */ -static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r1(DisasContext *s, DisasOps *o) { - o->in1 = load_reg(get_field(f, r1)); + o->in1 = load_reg(get_field(s, r1)); } #define SPEC_in1_r1 0 -static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r1_o(DisasContext *s, DisasOps *o) { - o->in1 = regs[get_field(f, r1)]; + o->in1 = regs[get_field(s, r1)]; o->g_in1 = true; } #define SPEC_in1_r1_o 0 -static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r1_32s(DisasContext *s, DisasOps *o) { o->in1 = tcg_temp_new_i64(); - tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]); + tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]); } #define SPEC_in1_r1_32s 0 -static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r1_32u(DisasContext *s, DisasOps *o) { o->in1 = tcg_temp_new_i64(); - tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]); + tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]); } #define SPEC_in1_r1_32u 0 -static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r1_sr32(DisasContext *s, DisasOps *o) { o->in1 = tcg_temp_new_i64(); - tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32); + tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32); } #define SPEC_in1_r1_sr32 0 -static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r1p1(DisasContext *s, DisasOps *o) { - o->in1 = load_reg(get_field(f, r1) + 1); + o->in1 = load_reg(get_field(s, r1) + 1); } #define SPEC_in1_r1p1 SPEC_r1_even -static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r1p1_32s(DisasContext *s, DisasOps *o) { o->in1 = tcg_temp_new_i64(); - tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]); + tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]); } #define SPEC_in1_r1p1_32s SPEC_r1_even -static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r1p1_32u(DisasContext *s, DisasOps *o) { o->in1 = tcg_temp_new_i64(); - tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]); + tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]); } #define SPEC_in1_r1p1_32u SPEC_r1_even -static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r1_D32(DisasContext *s, DisasOps *o) { - int r1 = get_field(f, r1); + int r1 = get_field(s, r1); o->in1 = tcg_temp_new_i64(); tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]); } #define SPEC_in1_r1_D32 SPEC_r1_even -static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r2(DisasContext *s, DisasOps *o) { - o->in1 = load_reg(get_field(f, r2)); + o->in1 = load_reg(get_field(s, r2)); } #define SPEC_in1_r2 0 -static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r2_sr32(DisasContext *s, DisasOps *o) { o->in1 = tcg_temp_new_i64(); - tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32); + tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32); } #define SPEC_in1_r2_sr32 0 -static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r3(DisasContext *s, DisasOps *o) { - o->in1 = load_reg(get_field(f, r3)); + o->in1 = load_reg(get_field(s, r3)); } #define SPEC_in1_r3 0 -static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r3_o(DisasContext *s, DisasOps *o) { - o->in1 = regs[get_field(f, r3)]; + o->in1 = regs[get_field(s, r3)]; o->g_in1 = true; } #define SPEC_in1_r3_o 0 -static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r3_32s(DisasContext *s, DisasOps *o) { o->in1 = tcg_temp_new_i64(); - tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]); + tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]); } #define SPEC_in1_r3_32s 0 -static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r3_32u(DisasContext *s, DisasOps *o) { o->in1 = tcg_temp_new_i64(); - tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]); + tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]); } #define SPEC_in1_r3_32u 0 -static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_r3_D32(DisasContext *s, DisasOps *o) { - int r3 = get_field(f, r3); + int r3 = get_field(s, r3); o->in1 = tcg_temp_new_i64(); tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]); } #define SPEC_in1_r3_D32 SPEC_r3_even -static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_e1(DisasContext *s, DisasOps *o) { - o->in1 = load_freg32_i64(get_field(f, r1)); + o->in1 = load_freg32_i64(get_field(s, r1)); } #define SPEC_in1_e1 0 -static void in1_f1(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_f1(DisasContext *s, DisasOps *o) { - o->in1 = load_freg(get_field(f, r1)); + o->in1 = load_freg(get_field(s, r1)); } #define SPEC_in1_f1 0 /* Load the high double word of an extended (128-bit) format FP number */ -static void in1_x2h(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_x2h(DisasContext *s, DisasOps *o) { - o->in1 = load_freg(get_field(f, r2)); + o->in1 = load_freg(get_field(s, r2)); } #define SPEC_in1_x2h SPEC_r2_f128 -static void in1_f3(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_f3(DisasContext *s, DisasOps *o) { - o->in1 = load_freg(get_field(f, r3)); + o->in1 = load_freg(get_field(s, r3)); } #define SPEC_in1_f3 0 -static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_la1(DisasContext *s, DisasOps *o) { - o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1)); + o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); } #define SPEC_in1_la1 0 -static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_la2(DisasContext *s, DisasOps *o) { - int x2 = have_field(f, x2) ? get_field(f, x2) : 0; - o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2)); + int x2 = have_field(s, x2) ? get_field(s, x2) : 0; + o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); } #define SPEC_in1_la2 0 -static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_m1_8u(DisasContext *s, DisasOps *o) { - in1_la1(s, f, o); + in1_la1(s, o); o->in1 = tcg_temp_new_i64(); tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s)); } #define SPEC_in1_m1_8u 0 -static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_m1_16s(DisasContext *s, DisasOps *o) { - in1_la1(s, f, o); + in1_la1(s, o); o->in1 = tcg_temp_new_i64(); tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s)); } #define SPEC_in1_m1_16s 0 -static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_m1_16u(DisasContext *s, DisasOps *o) { - in1_la1(s, f, o); + in1_la1(s, o); o->in1 = tcg_temp_new_i64(); tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s)); } #define SPEC_in1_m1_16u 0 -static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_m1_32s(DisasContext *s, DisasOps *o) { - in1_la1(s, f, o); + in1_la1(s, o); o->in1 = tcg_temp_new_i64(); tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s)); } #define SPEC_in1_m1_32s 0 -static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_m1_32u(DisasContext *s, DisasOps *o) { - in1_la1(s, f, o); + in1_la1(s, o); o->in1 = tcg_temp_new_i64(); tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s)); } #define SPEC_in1_m1_32u 0 -static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o) +static void in1_m1_64(DisasContext *s, DisasOps *o) { - in1_la1(s, f, o); + in1_la1(s, o); o->in1 = tcg_temp_new_i64(); tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s)); } @@ -5700,306 +5701,306 @@ static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o) /* ====================================================================== */ /* The "INput 2" generators. These load the second operand to an insn. */ -static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r1_o(DisasContext *s, DisasOps *o) { - o->in2 = regs[get_field(f, r1)]; + o->in2 = regs[get_field(s, r1)]; o->g_in2 = true; } #define SPEC_in2_r1_o 0 -static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r1_16u(DisasContext *s, DisasOps *o) { o->in2 = tcg_temp_new_i64(); - tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]); + tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]); } #define SPEC_in2_r1_16u 0 -static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r1_32u(DisasContext *s, DisasOps *o) { o->in2 = tcg_temp_new_i64(); - tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]); + tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]); } #define SPEC_in2_r1_32u 0 -static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r1_D32(DisasContext *s, DisasOps *o) { - int r1 = get_field(f, r1); + int r1 = get_field(s, r1); o->in2 = tcg_temp_new_i64(); tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]); } #define SPEC_in2_r1_D32 SPEC_r1_even -static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r2(DisasContext *s, DisasOps *o) { - o->in2 = load_reg(get_field(f, r2)); + o->in2 = load_reg(get_field(s, r2)); } #define SPEC_in2_r2 0 -static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r2_o(DisasContext *s, DisasOps *o) { - o->in2 = regs[get_field(f, r2)]; + o->in2 = regs[get_field(s, r2)]; o->g_in2 = true; } #define SPEC_in2_r2_o 0 -static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r2_nz(DisasContext *s, DisasOps *o) { - int r2 = get_field(f, r2); + int r2 = get_field(s, r2); if (r2 != 0) { o->in2 = load_reg(r2); } } #define SPEC_in2_r2_nz 0 -static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r2_8s(DisasContext *s, DisasOps *o) { o->in2 = tcg_temp_new_i64(); - tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]); + tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]); } #define SPEC_in2_r2_8s 0 -static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r2_8u(DisasContext *s, DisasOps *o) { o->in2 = tcg_temp_new_i64(); - tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]); + tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]); } #define SPEC_in2_r2_8u 0 -static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r2_16s(DisasContext *s, DisasOps *o) { o->in2 = tcg_temp_new_i64(); - tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]); + tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]); } #define SPEC_in2_r2_16s 0 -static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r2_16u(DisasContext *s, DisasOps *o) { o->in2 = tcg_temp_new_i64(); - tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]); + tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]); } #define SPEC_in2_r2_16u 0 -static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r3(DisasContext *s, DisasOps *o) { - o->in2 = load_reg(get_field(f, r3)); + o->in2 = load_reg(get_field(s, r3)); } #define SPEC_in2_r3 0 -static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r3_sr32(DisasContext *s, DisasOps *o) { o->in2 = tcg_temp_new_i64(); - tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32); + tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32); } #define SPEC_in2_r3_sr32 0 -static void in2_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r3_32u(DisasContext *s, DisasOps *o) { o->in2 = tcg_temp_new_i64(); - tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r3)]); + tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]); } #define SPEC_in2_r3_32u 0 -static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r2_32s(DisasContext *s, DisasOps *o) { o->in2 = tcg_temp_new_i64(); - tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]); + tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]); } #define SPEC_in2_r2_32s 0 -static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r2_32u(DisasContext *s, DisasOps *o) { o->in2 = tcg_temp_new_i64(); - tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]); + tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]); } #define SPEC_in2_r2_32u 0 -static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_r2_sr32(DisasContext *s, DisasOps *o) { o->in2 = tcg_temp_new_i64(); - tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32); + tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32); } #define SPEC_in2_r2_sr32 0 -static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_e2(DisasContext *s, DisasOps *o) { - o->in2 = load_freg32_i64(get_field(f, r2)); + o->in2 = load_freg32_i64(get_field(s, r2)); } #define SPEC_in2_e2 0 -static void in2_f2(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_f2(DisasContext *s, DisasOps *o) { - o->in2 = load_freg(get_field(f, r2)); + o->in2 = load_freg(get_field(s, r2)); } #define SPEC_in2_f2 0 /* Load the low double word of an extended (128-bit) format FP number */ -static void in2_x2l(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_x2l(DisasContext *s, DisasOps *o) { - o->in2 = load_freg(get_field(f, r2) + 2); + o->in2 = load_freg(get_field(s, r2) + 2); } #define SPEC_in2_x2l SPEC_r2_f128 -static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_ra2(DisasContext *s, DisasOps *o) { - o->in2 = get_address(s, 0, get_field(f, r2), 0); + o->in2 = get_address(s, 0, get_field(s, r2), 0); } #define SPEC_in2_ra2 0 -static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_a2(DisasContext *s, DisasOps *o) { - int x2 = have_field(f, x2) ? get_field(f, x2) : 0; - o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2)); + int x2 = have_field(s, x2) ? get_field(s, x2) : 0; + o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); } #define SPEC_in2_a2 0 -static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_ri2(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2); + o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2); } #define SPEC_in2_ri2 0 -static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_sh32(DisasContext *s, DisasOps *o) { - help_l2_shift(s, f, o, 31); + help_l2_shift(s, o, 31); } #define SPEC_in2_sh32 0 -static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_sh64(DisasContext *s, DisasOps *o) { - help_l2_shift(s, f, o, 63); + help_l2_shift(s, o, 63); } #define SPEC_in2_sh64 0 -static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_m2_8u(DisasContext *s, DisasOps *o) { - in2_a2(s, f, o); + in2_a2(s, o); tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_8u 0 -static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_m2_16s(DisasContext *s, DisasOps *o) { - in2_a2(s, f, o); + in2_a2(s, o); tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_16s 0 -static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_m2_16u(DisasContext *s, DisasOps *o) { - in2_a2(s, f, o); + in2_a2(s, o); tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_16u 0 -static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_m2_32s(DisasContext *s, DisasOps *o) { - in2_a2(s, f, o); + in2_a2(s, o); tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_32s 0 -static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_m2_32u(DisasContext *s, DisasOps *o) { - in2_a2(s, f, o); + in2_a2(s, o); tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_32u 0 #ifndef CONFIG_USER_ONLY -static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_m2_32ua(DisasContext *s, DisasOps *o) { - in2_a2(s, f, o); + in2_a2(s, o); tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN); } #define SPEC_in2_m2_32ua 0 #endif -static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_m2_64(DisasContext *s, DisasOps *o) { - in2_a2(s, f, o); + in2_a2(s, o); tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_m2_64 0 #ifndef CONFIG_USER_ONLY -static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_m2_64a(DisasContext *s, DisasOps *o) { - in2_a2(s, f, o); + in2_a2(s, o); tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN); } #define SPEC_in2_m2_64a 0 #endif -static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_mri2_16u(DisasContext *s, DisasOps *o) { - in2_ri2(s, f, o); + in2_ri2(s, o); tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_mri2_16u 0 -static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_mri2_32s(DisasContext *s, DisasOps *o) { - in2_ri2(s, f, o); + in2_ri2(s, o); tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_mri2_32s 0 -static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_mri2_32u(DisasContext *s, DisasOps *o) { - in2_ri2(s, f, o); + in2_ri2(s, o); tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_mri2_32u 0 -static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_mri2_64(DisasContext *s, DisasOps *o) { - in2_ri2(s, f, o); + in2_ri2(s, o); tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); } #define SPEC_in2_mri2_64 0 -static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_i2(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64(get_field(f, i2)); + o->in2 = tcg_const_i64(get_field(s, i2)); } #define SPEC_in2_i2 0 -static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_i2_8u(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64((uint8_t)get_field(f, i2)); + o->in2 = tcg_const_i64((uint8_t)get_field(s, i2)); } #define SPEC_in2_i2_8u 0 -static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_i2_16u(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64((uint16_t)get_field(f, i2)); + o->in2 = tcg_const_i64((uint16_t)get_field(s, i2)); } #define SPEC_in2_i2_16u 0 -static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_i2_32u(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64((uint32_t)get_field(f, i2)); + o->in2 = tcg_const_i64((uint32_t)get_field(s, i2)); } #define SPEC_in2_i2_32u 0 -static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_i2_16u_shl(DisasContext *s, DisasOps *o) { - uint64_t i2 = (uint16_t)get_field(f, i2); + uint64_t i2 = (uint16_t)get_field(s, i2); o->in2 = tcg_const_i64(i2 << s->insn->data); } #define SPEC_in2_i2_16u_shl 0 -static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_i2_32u_shl(DisasContext *s, DisasOps *o) { - uint64_t i2 = (uint32_t)get_field(f, i2); + uint64_t i2 = (uint32_t)get_field(s, i2); o->in2 = tcg_const_i64(i2 << s->insn->data); } #define SPEC_in2_i2_32u_shl 0 #ifndef CONFIG_USER_ONLY -static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o) +static void in2_insn(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64(s->fields->raw_insn); + o->in2 = tcg_const_i64(s->fields.raw_insn); } #define SPEC_in2_insn 0 #endif @@ -6182,8 +6183,7 @@ static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn) /* Lookup the insn at the current PC, extracting the operands into O and returning the info struct for the insn. Returns NULL for invalid insn. */ -static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s, - DisasFields *f) +static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s) { uint64_t insn, pc = s->base.pc_next; int op, op2, ilen; @@ -6263,13 +6263,14 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s, break; } - memset(f, 0, sizeof(*f)); - f->raw_insn = insn; - f->op = op; - f->op2 = op2; + memset(&s->fields, 0, sizeof(s->fields)); + s->fields.raw_insn = insn; + s->fields.op = op; + s->fields.op2 = op2; /* Lookup the instruction. */ info = lookup_opc(op << 8 | op2); + s->insn = info; /* If we found it, extract the operands. */ if (info != NULL) { @@ -6277,7 +6278,7 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s, int i; for (i = 0; i < NUM_C_FIELD; ++i) { - extract_field(f, &format_info[fmt].op[i], insn); + extract_field(&s->fields, &format_info[fmt].op[i], insn); } } return info; @@ -6298,11 +6299,10 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) { const DisasInsn *insn; DisasJumpType ret = DISAS_NEXT; - DisasFields f; DisasOps o = {}; /* Search for the insn in the table. */ - insn = extract_insn(env, s, &f); + insn = extract_insn(env, s); /* Emit insn_start now that we know the ILEN. */ tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen); @@ -6310,7 +6310,7 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) /* Not found means unimplemented/illegal opcode. */ if (insn == NULL) { qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n", - f.op, f.op2); + s->fields.op, s->fields.op2); gen_illegal_opcode(s); return DISAS_NORETURN; } @@ -6335,13 +6335,13 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) if (!(s->base.tb->flags & FLAG_MASK_AFP)) { uint8_t dxc = 0; - if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) { + if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) { dxc = 1; } - if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) { + if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) { dxc = 1; } - if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) { + if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) { dxc = 1; } if (insn->flags & IF_BFP) { @@ -6370,36 +6370,32 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) /* Check for insn specification exceptions. */ if (insn->spec) { - if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) || - (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) || - (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) || - (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) || - (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) { + if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) || + (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) || + (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) || + (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) || + (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } } - /* Set up the strutures we use to communicate with the helpers. */ - s->insn = insn; - s->fields = &f; - /* Implement the instruction. */ if (insn->help_in1) { - insn->help_in1(s, &f, &o); + insn->help_in1(s, &o); } if (insn->help_in2) { - insn->help_in2(s, &f, &o); + insn->help_in2(s, &o); } if (insn->help_prep) { - insn->help_prep(s, &f, &o); + insn->help_prep(s, &o); } if (insn->help_op) { ret = insn->help_op(s, &o); } if (ret != DISAS_NORETURN) { if (insn->help_wout) { - insn->help_wout(s, &f, &o); + insn->help_wout(s, &o); } if (insn->help_cout) { insn->help_cout(s, &o); diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c index 71059f9ca0..24558cce80 100644 --- a/target/s390x/translate_vx.inc.c +++ b/target/s390x/translate_vx.inc.c @@ -355,7 +355,7 @@ static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, static DisasJumpType op_vge(DisasContext *s, DisasOps *o) { const uint8_t es = s->insn->data; - const uint8_t enr = get_field(s->fields, m3); + const uint8_t enr = get_field(s, m3); TCGv_i64 tmp; if (!valid_vec_element(enr, es)) { @@ -364,12 +364,12 @@ static DisasJumpType op_vge(DisasContext *s, DisasOps *o) } tmp = tcg_temp_new_i64(); - read_vec_element_i64(tmp, get_field(s->fields, v2), enr, es); + read_vec_element_i64(tmp, get_field(s, v2), enr, es); tcg_gen_add_i64(o->addr1, o->addr1, tmp); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0); tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); - write_vec_element_i64(tmp, get_field(s->fields, v1), enr, es); + write_vec_element_i64(tmp, get_field(s, v1), enr, es); tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -389,22 +389,22 @@ static uint64_t generate_byte_mask(uint8_t mask) static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o) { - const uint16_t i2 = get_field(s->fields, i2); + const uint16_t i2 = get_field(s, i2); if (i2 == (i2 & 0xff) * 0x0101) { /* * Masks for both 64 bit elements of the vector are the same. * Trust tcg to produce a good constant loading. */ - gen_gvec_dup64i(get_field(s->fields, v1), + gen_gvec_dup64i(get_field(s, v1), generate_byte_mask(i2 & 0xff)); } else { TCGv_i64 t = tcg_temp_new_i64(); tcg_gen_movi_i64(t, generate_byte_mask(i2 >> 8)); - write_vec_element_i64(t, get_field(s->fields, v1), 0, ES_64); + write_vec_element_i64(t, get_field(s, v1), 0, ES_64); tcg_gen_movi_i64(t, generate_byte_mask(i2)); - write_vec_element_i64(t, get_field(s->fields, v1), 1, ES_64); + write_vec_element_i64(t, get_field(s, v1), 1, ES_64); tcg_temp_free_i64(t); } return DISAS_NEXT; @@ -412,10 +412,10 @@ static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o) static DisasJumpType op_vgm(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); const uint8_t bits = NUM_VEC_ELEMENT_BITS(es); - const uint8_t i2 = get_field(s->fields, i2) & (bits - 1); - const uint8_t i3 = get_field(s->fields, i3) & (bits - 1); + const uint8_t i2 = get_field(s, i2) & (bits - 1); + const uint8_t i3 = get_field(s, i3) & (bits - 1); uint64_t mask = 0; int i; @@ -432,7 +432,7 @@ static DisasJumpType op_vgm(DisasContext *s, DisasOps *o) } } - gen_gvec_dupi(es, get_field(s->fields, v1), mask); + gen_gvec_dupi(es, get_field(s, v1), mask); return DISAS_NEXT; } @@ -444,8 +444,8 @@ static DisasJumpType op_vl(DisasContext *s, DisasOps *o) tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ); - write_vec_element_i64(t0, get_field(s->fields, v1), 0, ES_64); - write_vec_element_i64(t1, get_field(s->fields, v1), 1, ES_64); + write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); + write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); tcg_temp_free(t0); tcg_temp_free(t1); return DISAS_NEXT; @@ -453,13 +453,13 @@ static DisasJumpType op_vl(DisasContext *s, DisasOps *o) static DisasJumpType op_vlr(DisasContext *s, DisasOps *o) { - gen_gvec_mov(get_field(s->fields, v1), get_field(s->fields, v2)); + gen_gvec_mov(get_field(s, v1), get_field(s, v2)); return DISAS_NEXT; } static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m3); + const uint8_t es = get_field(s, m3); TCGv_i64 tmp; if (es > ES_64) { @@ -469,7 +469,7 @@ static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o) tmp = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); - gen_gvec_dup_i64(es, get_field(s->fields, v1), tmp); + gen_gvec_dup_i64(es, get_field(s, v1), tmp); tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -477,7 +477,7 @@ static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o) static DisasJumpType op_vle(DisasContext *s, DisasOps *o) { const uint8_t es = s->insn->data; - const uint8_t enr = get_field(s->fields, m3); + const uint8_t enr = get_field(s, m3); TCGv_i64 tmp; if (!valid_vec_element(enr, es)) { @@ -487,7 +487,7 @@ static DisasJumpType op_vle(DisasContext *s, DisasOps *o) tmp = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); - write_vec_element_i64(tmp, get_field(s->fields, v1), enr, es); + write_vec_element_i64(tmp, get_field(s, v1), enr, es); tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -495,7 +495,7 @@ static DisasJumpType op_vle(DisasContext *s, DisasOps *o) static DisasJumpType op_vlei(DisasContext *s, DisasOps *o) { const uint8_t es = s->insn->data; - const uint8_t enr = get_field(s->fields, m3); + const uint8_t enr = get_field(s, m3); TCGv_i64 tmp; if (!valid_vec_element(enr, es)) { @@ -503,15 +503,15 @@ static DisasJumpType op_vlei(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } - tmp = tcg_const_i64((int16_t)get_field(s->fields, i2)); - write_vec_element_i64(tmp, get_field(s->fields, v1), enr, es); + tmp = tcg_const_i64((int16_t)get_field(s, i2)); + write_vec_element_i64(tmp, get_field(s, v1), enr, es); tcg_temp_free_i64(tmp); return DISAS_NEXT; } static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); TCGv_ptr ptr; if (es > ES_64) { @@ -520,15 +520,15 @@ static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o) } /* fast path if we don't need the register content */ - if (!get_field(s->fields, b2)) { - uint8_t enr = get_field(s->fields, d2) & (NUM_VEC_ELEMENTS(es) - 1); + if (!get_field(s, b2)) { + uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1); - read_vec_element_i64(o->out, get_field(s->fields, v3), enr, es); + read_vec_element_i64(o->out, get_field(s, v3), enr, es); return DISAS_NEXT; } ptr = tcg_temp_new_ptr(); - get_vec_element_ptr_i64(ptr, get_field(s->fields, v3), o->addr1, es); + get_vec_element_ptr_i64(ptr, get_field(s, v3), o->addr1, es); switch (es) { case ES_8: tcg_gen_ld8u_i64(o->out, ptr, 0); @@ -552,7 +552,7 @@ static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o) static DisasJumpType op_vllez(DisasContext *s, DisasOps *o) { - uint8_t es = get_field(s->fields, m3); + uint8_t es = get_field(s, m3); uint8_t enr; TCGv_i64 t; @@ -585,16 +585,16 @@ static DisasJumpType op_vllez(DisasContext *s, DisasOps *o) t = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es); - zero_vec(get_field(s->fields, v1)); - write_vec_element_i64(t, get_field(s->fields, v1), enr, es); + zero_vec(get_field(s, v1)); + write_vec_element_i64(t, get_field(s, v1), enr, es); tcg_temp_free_i64(t); return DISAS_NEXT; } static DisasJumpType op_vlm(DisasContext *s, DisasOps *o) { - const uint8_t v3 = get_field(s->fields, v3); - uint8_t v1 = get_field(s->fields, v1); + const uint8_t v3 = get_field(s, v3); + uint8_t v1 = get_field(s, v1); TCGv_i64 t0, t1; if (v3 < v1 || (v3 - v1 + 1) > 16) { @@ -633,12 +633,12 @@ static DisasJumpType op_vlm(DisasContext *s, DisasOps *o) static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o) { - const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6)); - const int v1_offs = vec_full_reg_offset(get_field(s->fields, v1)); + const int64_t block_size = (1ull << (get_field(s, m3) + 6)); + const int v1_offs = vec_full_reg_offset(get_field(s, v1)); TCGv_ptr a0; TCGv_i64 bytes; - if (get_field(s->fields, m3) > 6) { + if (get_field(s, m3) > 6) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } @@ -658,7 +658,7 @@ static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o) static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); TCGv_ptr ptr; if (es > ES_64) { @@ -667,15 +667,15 @@ static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o) } /* fast path if we don't need the register content */ - if (!get_field(s->fields, b2)) { - uint8_t enr = get_field(s->fields, d2) & (NUM_VEC_ELEMENTS(es) - 1); + if (!get_field(s, b2)) { + uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1); - write_vec_element_i64(o->in2, get_field(s->fields, v1), enr, es); + write_vec_element_i64(o->in2, get_field(s, v1), enr, es); return DISAS_NEXT; } ptr = tcg_temp_new_ptr(); - get_vec_element_ptr_i64(ptr, get_field(s->fields, v1), o->addr1, es); + get_vec_element_ptr_i64(ptr, get_field(s, v1), o->addr1, es); switch (es) { case ES_8: tcg_gen_st8_i64(o->in2, ptr, 0); @@ -699,14 +699,14 @@ static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o) static DisasJumpType op_vlvgp(DisasContext *s, DisasOps *o) { - write_vec_element_i64(o->in1, get_field(s->fields, v1), 0, ES_64); - write_vec_element_i64(o->in2, get_field(s->fields, v1), 1, ES_64); + write_vec_element_i64(o->in1, get_field(s, v1), 0, ES_64); + write_vec_element_i64(o->in2, get_field(s, v1), 1, ES_64); return DISAS_NEXT; } static DisasJumpType op_vll(DisasContext *s, DisasOps *o) { - const int v1_offs = vec_full_reg_offset(get_field(s->fields, v1)); + const int v1_offs = vec_full_reg_offset(get_field(s, v1)); TCGv_ptr a0 = tcg_temp_new_ptr(); /* convert highest index into an actual length */ @@ -719,10 +719,10 @@ static DisasJumpType op_vll(DisasContext *s, DisasOps *o) static DisasJumpType op_vmr(DisasContext *s, DisasOps *o) { - const uint8_t v1 = get_field(s->fields, v1); - const uint8_t v2 = get_field(s->fields, v2); - const uint8_t v3 = get_field(s->fields, v3); - const uint8_t es = get_field(s->fields, m4); + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t v3 = get_field(s, v3); + const uint8_t es = get_field(s, m4); int dst_idx, src_idx; TCGv_i64 tmp; @@ -732,7 +732,7 @@ static DisasJumpType op_vmr(DisasContext *s, DisasOps *o) } tmp = tcg_temp_new_i64(); - if (s->fields->op2 == 0x61) { + if (s->fields.op2 == 0x61) { /* iterate backwards to avoid overwriting data we might need later */ for (dst_idx = NUM_VEC_ELEMENTS(es) - 1; dst_idx >= 0; dst_idx--) { src_idx = dst_idx / 2; @@ -761,10 +761,10 @@ static DisasJumpType op_vmr(DisasContext *s, DisasOps *o) static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) { - const uint8_t v1 = get_field(s->fields, v1); - const uint8_t v2 = get_field(s->fields, v2); - const uint8_t v3 = get_field(s->fields, v3); - const uint8_t es = get_field(s->fields, m4); + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t v3 = get_field(s, v3); + const uint8_t es = get_field(s, m4); static gen_helper_gvec_3 * const vpk[3] = { gen_helper_gvec_vpk16, gen_helper_gvec_vpk32, @@ -796,9 +796,9 @@ static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } - switch (s->fields->op2) { + switch (s->fields.op2) { case 0x97: - if (get_field(s->fields, m5) & 0x1) { + if (get_field(s, m5) & 0x1) { gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpks_cc[es - 1]); set_cc_static(s); } else { @@ -806,7 +806,7 @@ static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) } break; case 0x95: - if (get_field(s->fields, m5) & 0x1) { + if (get_field(s, m5) & 0x1) { gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpkls_cc[es - 1]); set_cc_static(s); } else { @@ -816,7 +816,7 @@ static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) case 0x94: /* If sources and destination dont't overlap -> fast path */ if (v1 != v2 && v1 != v3) { - const uint8_t src_es = get_field(s->fields, m4); + const uint8_t src_es = get_field(s, m4); const uint8_t dst_es = src_es - 1; TCGv_i64 tmp = tcg_temp_new_i64(); int dst_idx, src_idx; @@ -844,23 +844,23 @@ static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) static DisasJumpType op_vperm(DisasContext *s, DisasOps *o) { - gen_gvec_4_ool(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), get_field(s->fields, v4), + gen_gvec_4_ool(get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), 0, gen_helper_gvec_vperm); return DISAS_NEXT; } static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o) { - const uint8_t i2 = extract32(get_field(s->fields, m4), 2, 1); - const uint8_t i3 = extract32(get_field(s->fields, m4), 0, 1); + const uint8_t i2 = extract32(get_field(s, m4), 2, 1); + const uint8_t i3 = extract32(get_field(s, m4), 0, 1); TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); - read_vec_element_i64(t0, get_field(s->fields, v2), i2, ES_64); - read_vec_element_i64(t1, get_field(s->fields, v3), i3, ES_64); - write_vec_element_i64(t0, get_field(s->fields, v1), 0, ES_64); - write_vec_element_i64(t1, get_field(s->fields, v1), 1, ES_64); + read_vec_element_i64(t0, get_field(s, v2), i2, ES_64); + read_vec_element_i64(t1, get_field(s, v3), i3, ES_64); + write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); + write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); return DISAS_NEXT; @@ -868,38 +868,38 @@ static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o) static DisasJumpType op_vrep(DisasContext *s, DisasOps *o) { - const uint8_t enr = get_field(s->fields, i2); - const uint8_t es = get_field(s->fields, m4); + const uint8_t enr = get_field(s, i2); + const uint8_t es = get_field(s, m4); if (es > ES_64 || !valid_vec_element(enr, es)) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - tcg_gen_gvec_dup_mem(es, vec_full_reg_offset(get_field(s->fields, v1)), - vec_reg_offset(get_field(s->fields, v3), enr, es), + tcg_gen_gvec_dup_mem(es, vec_full_reg_offset(get_field(s, v1)), + vec_reg_offset(get_field(s, v3), enr, es), 16, 16); return DISAS_NEXT; } static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o) { - const int64_t data = (int16_t)get_field(s->fields, i2); - const uint8_t es = get_field(s->fields, m3); + const int64_t data = (int16_t)get_field(s, i2); + const uint8_t es = get_field(s, m3); if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec_dupi(es, get_field(s->fields, v1), data); + gen_gvec_dupi(es, get_field(s, v1), data); return DISAS_NEXT; } static DisasJumpType op_vsce(DisasContext *s, DisasOps *o) { const uint8_t es = s->insn->data; - const uint8_t enr = get_field(s->fields, m3); + const uint8_t enr = get_field(s, m3); TCGv_i64 tmp; if (!valid_vec_element(enr, es)) { @@ -908,11 +908,11 @@ static DisasJumpType op_vsce(DisasContext *s, DisasOps *o) } tmp = tcg_temp_new_i64(); - read_vec_element_i64(tmp, get_field(s->fields, v2), enr, es); + read_vec_element_i64(tmp, get_field(s, v2), enr, es); tcg_gen_add_i64(o->addr1, o->addr1, tmp); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0); - read_vec_element_i64(tmp, get_field(s->fields, v1), enr, es); + read_vec_element_i64(tmp, get_field(s, v1), enr, es); tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); tcg_temp_free_i64(tmp); return DISAS_NEXT; @@ -920,15 +920,15 @@ static DisasJumpType op_vsce(DisasContext *s, DisasOps *o) static DisasJumpType op_vsel(DisasContext *s, DisasOps *o) { - gen_gvec_fn_4(bitsel, ES_8, get_field(s->fields, v1), - get_field(s->fields, v4), get_field(s->fields, v2), - get_field(s->fields, v3)); + gen_gvec_fn_4(bitsel, ES_8, get_field(s, v1), + get_field(s, v4), get_field(s, v2), + get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vseg(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m3); + const uint8_t es = get_field(s, m3); int idx1, idx2; TCGv_i64 tmp; @@ -951,10 +951,10 @@ static DisasJumpType op_vseg(DisasContext *s, DisasOps *o) } tmp = tcg_temp_new_i64(); - read_vec_element_i64(tmp, get_field(s->fields, v2), idx1, es | MO_SIGN); - write_vec_element_i64(tmp, get_field(s->fields, v1), 0, ES_64); - read_vec_element_i64(tmp, get_field(s->fields, v2), idx2, es | MO_SIGN); - write_vec_element_i64(tmp, get_field(s->fields, v1), 1, ES_64); + read_vec_element_i64(tmp, get_field(s, v2), idx1, es | MO_SIGN); + write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); + read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN); + write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64); tcg_temp_free_i64(tmp); return DISAS_NEXT; } @@ -966,10 +966,10 @@ static DisasJumpType op_vst(DisasContext *s, DisasOps *o) /* Probe write access before actually modifying memory */ gen_helper_probe_write_access(cpu_env, o->addr1, tmp); - read_vec_element_i64(tmp, get_field(s->fields, v1), 0, ES_64); + read_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); - read_vec_element_i64(tmp, get_field(s->fields, v1), 1, ES_64); + read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64); tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); tcg_temp_free_i64(tmp); return DISAS_NEXT; @@ -978,7 +978,7 @@ static DisasJumpType op_vst(DisasContext *s, DisasOps *o) static DisasJumpType op_vste(DisasContext *s, DisasOps *o) { const uint8_t es = s->insn->data; - const uint8_t enr = get_field(s->fields, m3); + const uint8_t enr = get_field(s, m3); TCGv_i64 tmp; if (!valid_vec_element(enr, es)) { @@ -987,7 +987,7 @@ static DisasJumpType op_vste(DisasContext *s, DisasOps *o) } tmp = tcg_temp_new_i64(); - read_vec_element_i64(tmp, get_field(s->fields, v1), enr, es); + read_vec_element_i64(tmp, get_field(s, v1), enr, es); tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); tcg_temp_free_i64(tmp); return DISAS_NEXT; @@ -995,8 +995,8 @@ static DisasJumpType op_vste(DisasContext *s, DisasOps *o) static DisasJumpType op_vstm(DisasContext *s, DisasOps *o) { - const uint8_t v3 = get_field(s->fields, v3); - uint8_t v1 = get_field(s->fields, v1); + const uint8_t v3 = get_field(s, v3); + uint8_t v1 = get_field(s, v1); TCGv_i64 tmp; while (v3 < v1 || (v3 - v1 + 1) > 16) { @@ -1025,7 +1025,7 @@ static DisasJumpType op_vstm(DisasContext *s, DisasOps *o) static DisasJumpType op_vstl(DisasContext *s, DisasOps *o) { - const int v1_offs = vec_full_reg_offset(get_field(s->fields, v1)); + const int v1_offs = vec_full_reg_offset(get_field(s, v1)); TCGv_ptr a0 = tcg_temp_new_ptr(); /* convert highest index into an actual length */ @@ -1038,10 +1038,10 @@ static DisasJumpType op_vstl(DisasContext *s, DisasOps *o) static DisasJumpType op_vup(DisasContext *s, DisasOps *o) { - const bool logical = s->fields->op2 == 0xd4 || s->fields->op2 == 0xd5; - const uint8_t v1 = get_field(s->fields, v1); - const uint8_t v2 = get_field(s->fields, v2); - const uint8_t src_es = get_field(s->fields, m3); + const bool logical = s->fields.op2 == 0xd4 || s->fields.op2 == 0xd5; + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t src_es = get_field(s, m3); const uint8_t dst_es = src_es + 1; int dst_idx, src_idx; TCGv_i64 tmp; @@ -1052,7 +1052,7 @@ static DisasJumpType op_vup(DisasContext *s, DisasOps *o) } tmp = tcg_temp_new_i64(); - if (s->fields->op2 == 0xd7 || s->fields->op2 == 0xd5) { + if (s->fields.op2 == 0xd7 || s->fields.op2 == 0xd5) { /* iterate backwards to avoid overwriting data we might need later */ for (dst_idx = NUM_VEC_ELEMENTS(dst_es) - 1; dst_idx >= 0; dst_idx--) { src_idx = dst_idx; @@ -1076,18 +1076,18 @@ static DisasJumpType op_vup(DisasContext *s, DisasOps *o) static DisasJumpType op_va(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); if (es > ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } else if (es == ES_128) { - gen_gvec128_3_i64(tcg_gen_add2_i64, get_field(s->fields, v1), - get_field(s->fields, v2), get_field(s->fields, v3)); + gen_gvec128_3_i64(tcg_gen_add2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } - gen_gvec_fn_3(add, es, get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3)); + gen_gvec_fn_3(add, es, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); return DISAS_NEXT; } @@ -1165,7 +1165,7 @@ static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, static DisasJumpType op_vacc(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fni8 = gen_acc8_i64, }, { .fni8 = gen_acc16_i64, }, @@ -1177,12 +1177,12 @@ static DisasJumpType op_vacc(DisasContext *s, DisasOps *o) gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } else if (es == ES_128) { - gen_gvec128_3_i64(gen_acc2_i64, get_field(s->fields, v1), - get_field(s->fields, v2), get_field(s->fields, v3)); + gen_gvec128_3_i64(gen_acc2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } - gen_gvec_3(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), &g[es]); + gen_gvec_3(get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); return DISAS_NEXT; } @@ -1203,14 +1203,14 @@ static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, static DisasJumpType op_vac(DisasContext *s, DisasOps *o) { - if (get_field(s->fields, m5) != ES_128) { + if (get_field(s, m5) != ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec128_4_i64(gen_ac2_i64, get_field(s->fields, v1), - get_field(s->fields, v2), get_field(s->fields, v3), - get_field(s->fields, v4)); + gen_gvec128_4_i64(gen_ac2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3), + get_field(s, v4)); return DISAS_NEXT; } @@ -1235,28 +1235,28 @@ static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o) { - if (get_field(s->fields, m5) != ES_128) { + if (get_field(s, m5) != ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec128_4_i64(gen_accc2_i64, get_field(s->fields, v1), - get_field(s->fields, v2), get_field(s->fields, v3), - get_field(s->fields, v4)); + gen_gvec128_4_i64(gen_accc2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3), + get_field(s, v4)); return DISAS_NEXT; } static DisasJumpType op_vn(DisasContext *s, DisasOps *o) { - gen_gvec_fn_3(and, ES_8, get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3)); + gen_gvec_fn_3(and, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vnc(DisasContext *s, DisasOps *o) { - gen_gvec_fn_3(andc, ES_8, get_field(s->fields, v1), - get_field(s->fields, v2), get_field(s->fields, v3)); + gen_gvec_fn_3(andc, ES_8, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } @@ -1296,7 +1296,7 @@ static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) static DisasJumpType op_vavg(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fno = gen_helper_gvec_vavg8, }, { .fno = gen_helper_gvec_vavg16, }, @@ -1308,8 +1308,8 @@ static DisasJumpType op_vavg(DisasContext *s, DisasOps *o) gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec_3(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), &g[es]); + gen_gvec_3(get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); return DISAS_NEXT; } @@ -1344,7 +1344,7 @@ static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fno = gen_helper_gvec_vavgl8, }, { .fno = gen_helper_gvec_vavgl16, }, @@ -1356,8 +1356,8 @@ static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o) gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec_3(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), &g[es]); + gen_gvec_3(get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); return DISAS_NEXT; } @@ -1367,13 +1367,13 @@ static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o) TCGv_i32 sum = tcg_temp_new_i32(); int i; - read_vec_element_i32(sum, get_field(s->fields, v3), 1, ES_32); + read_vec_element_i32(sum, get_field(s, v3), 1, ES_32); for (i = 0; i < 4; i++) { - read_vec_element_i32(tmp, get_field(s->fields, v2), i, ES_32); + read_vec_element_i32(tmp, get_field(s, v2), i, ES_32); tcg_gen_add2_i32(tmp, sum, sum, sum, tmp, tmp); } - zero_vec(get_field(s->fields, v1)); - write_vec_element_i32(sum, get_field(s->fields, v1), 1, ES_32); + zero_vec(get_field(s, v1)); + write_vec_element_i32(sum, get_field(s, v1), 1, ES_32); tcg_temp_free_i32(tmp); tcg_temp_free_i32(sum); @@ -1382,27 +1382,27 @@ static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o) static DisasJumpType op_vec(DisasContext *s, DisasOps *o) { - uint8_t es = get_field(s->fields, m3); + uint8_t es = get_field(s, m3); const uint8_t enr = NUM_VEC_ELEMENTS(es) / 2 - 1; if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - if (s->fields->op2 == 0xdb) { + if (s->fields.op2 == 0xdb) { es |= MO_SIGN; } o->in1 = tcg_temp_new_i64(); o->in2 = tcg_temp_new_i64(); - read_vec_element_i64(o->in1, get_field(s->fields, v1), enr, es); - read_vec_element_i64(o->in2, get_field(s->fields, v2), enr, es); + read_vec_element_i64(o->in1, get_field(s, v1), enr, es); + read_vec_element_i64(o->in2, get_field(s, v2), enr, es); return DISAS_NEXT; } static DisasJumpType op_vc(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); TCGCond cond = s->insn->data; if (es > ES_64) { @@ -1411,15 +1411,15 @@ static DisasJumpType op_vc(DisasContext *s, DisasOps *o) } tcg_gen_gvec_cmp(cond, es, - vec_full_reg_offset(get_field(s->fields, v1)), - vec_full_reg_offset(get_field(s->fields, v2)), - vec_full_reg_offset(get_field(s->fields, v3)), 16, 16); - if (get_field(s->fields, m5) & 0x1) { + vec_full_reg_offset(get_field(s, v1)), + vec_full_reg_offset(get_field(s, v2)), + vec_full_reg_offset(get_field(s, v3)), 16, 16); + if (get_field(s, m5) & 0x1) { TCGv_i64 low = tcg_temp_new_i64(); TCGv_i64 high = tcg_temp_new_i64(); - read_vec_element_i64(high, get_field(s->fields, v1), 0, ES_64); - read_vec_element_i64(low, get_field(s->fields, v1), 1, ES_64); + read_vec_element_i64(high, get_field(s, v1), 0, ES_64); + read_vec_element_i64(low, get_field(s, v1), 1, ES_64); gen_op_update2_cc_i64(s, CC_OP_VC, low, high); tcg_temp_free_i64(low); @@ -1440,7 +1440,7 @@ static void gen_clz_i64(TCGv_i64 d, TCGv_i64 a) static DisasJumpType op_vclz(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m3); + const uint8_t es = get_field(s, m3); static const GVecGen2 g[4] = { { .fno = gen_helper_gvec_vclz8, }, { .fno = gen_helper_gvec_vclz16, }, @@ -1452,7 +1452,7 @@ static DisasJumpType op_vclz(DisasContext *s, DisasOps *o) gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec_2(get_field(s->fields, v1), get_field(s->fields, v2), &g[es]); + gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]); return DISAS_NEXT; } @@ -1468,7 +1468,7 @@ static void gen_ctz_i64(TCGv_i64 d, TCGv_i64 a) static DisasJumpType op_vctz(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m3); + const uint8_t es = get_field(s, m3); static const GVecGen2 g[4] = { { .fno = gen_helper_gvec_vctz8, }, { .fno = gen_helper_gvec_vctz16, }, @@ -1480,20 +1480,20 @@ static DisasJumpType op_vctz(DisasContext *s, DisasOps *o) gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec_2(get_field(s->fields, v1), get_field(s->fields, v2), &g[es]); + gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]); return DISAS_NEXT; } static DisasJumpType op_vx(DisasContext *s, DisasOps *o) { - gen_gvec_fn_3(xor, ES_8, get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3)); + gen_gvec_fn_3(xor, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vgfm(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fno = gen_helper_gvec_vgfm8, }, { .fno = gen_helper_gvec_vgfm16, }, @@ -1505,14 +1505,14 @@ static DisasJumpType op_vgfm(DisasContext *s, DisasOps *o) gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec_3(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), &g[es]); + gen_gvec_3(get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); return DISAS_NEXT; } static DisasJumpType op_vgfma(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m5); + const uint8_t es = get_field(s, m5); static const GVecGen4 g[4] = { { .fno = gen_helper_gvec_vgfma8, }, { .fno = gen_helper_gvec_vgfma16, }, @@ -1524,50 +1524,50 @@ static DisasJumpType op_vgfma(DisasContext *s, DisasOps *o) gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec_4(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), get_field(s->fields, v4), &g[es]); + gen_gvec_4(get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), &g[es]); return DISAS_NEXT; } static DisasJumpType op_vlc(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m3); + const uint8_t es = get_field(s, m3); if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec_fn_2(neg, es, get_field(s->fields, v1), get_field(s->fields, v2)); + gen_gvec_fn_2(neg, es, get_field(s, v1), get_field(s, v2)); return DISAS_NEXT; } static DisasJumpType op_vlp(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m3); + const uint8_t es = get_field(s, m3); if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec_fn_2(abs, es, get_field(s->fields, v1), get_field(s->fields, v2)); + gen_gvec_fn_2(abs, es, get_field(s, v1), get_field(s, v2)); return DISAS_NEXT; } static DisasJumpType op_vmx(DisasContext *s, DisasOps *o) { - const uint8_t v1 = get_field(s->fields, v1); - const uint8_t v2 = get_field(s->fields, v2); - const uint8_t v3 = get_field(s->fields, v3); - const uint8_t es = get_field(s->fields, m4); + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t v3 = get_field(s, v3); + const uint8_t es = get_field(s, m4); if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - switch (s->fields->op2) { + switch (s->fields.op2) { case 0xff: gen_gvec_fn_3(smax, es, v1, v2, v3); break; @@ -1634,7 +1634,7 @@ static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) static DisasJumpType op_vma(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m5); + const uint8_t es = get_field(s, m5); static const GVecGen4 g_vmal[3] = { { .fno = gen_helper_gvec_vmal8, }, { .fno = gen_helper_gvec_vmal16, }, @@ -1677,7 +1677,7 @@ static DisasJumpType op_vma(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } - switch (s->fields->op2) { + switch (s->fields.op2) { case 0xaa: fn = &g_vmal[es]; break; @@ -1703,8 +1703,8 @@ static DisasJumpType op_vma(DisasContext *s, DisasOps *o) g_assert_not_reached(); } - gen_gvec_4(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), get_field(s->fields, v4), fn); + gen_gvec_4(get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), fn); return DISAS_NEXT; } @@ -1726,7 +1726,7 @@ static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) static DisasJumpType op_vm(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); static const GVecGen3 g_vmh[3] = { { .fno = gen_helper_gvec_vmh8, }, { .fno = gen_helper_gvec_vmh16, }, @@ -1764,10 +1764,10 @@ static DisasJumpType op_vm(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } - switch (s->fields->op2) { + switch (s->fields.op2) { case 0xa2: - gen_gvec_fn_3(mul, es, get_field(s->fields, v1), - get_field(s->fields, v2), get_field(s->fields, v3)); + gen_gvec_fn_3(mul, es, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; case 0xa3: fn = &g_vmh[es]; @@ -1791,49 +1791,49 @@ static DisasJumpType op_vm(DisasContext *s, DisasOps *o) g_assert_not_reached(); } - gen_gvec_3(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), fn); + gen_gvec_3(get_field(s, v1), get_field(s, v2), + get_field(s, v3), fn); return DISAS_NEXT; } static DisasJumpType op_vnn(DisasContext *s, DisasOps *o) { - gen_gvec_fn_3(nand, ES_8, get_field(s->fields, v1), - get_field(s->fields, v2), get_field(s->fields, v3)); + gen_gvec_fn_3(nand, ES_8, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vno(DisasContext *s, DisasOps *o) { - gen_gvec_fn_3(nor, ES_8, get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3)); + gen_gvec_fn_3(nor, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vnx(DisasContext *s, DisasOps *o) { - gen_gvec_fn_3(eqv, ES_8, get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3)); + gen_gvec_fn_3(eqv, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vo(DisasContext *s, DisasOps *o) { - gen_gvec_fn_3(or, ES_8, get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3)); + gen_gvec_fn_3(or, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_voc(DisasContext *s, DisasOps *o) { - gen_gvec_fn_3(orc, ES_8, get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3)); + gen_gvec_fn_3(orc, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); return DISAS_NEXT; } static DisasJumpType op_vpopct(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m3); + const uint8_t es = get_field(s, m3); static const GVecGen2 g[4] = { { .fno = gen_helper_gvec_vpopct8, }, { .fno = gen_helper_gvec_vpopct16, }, @@ -1846,7 +1846,7 @@ static DisasJumpType op_vpopct(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } - gen_gvec_2(get_field(s->fields, v1), get_field(s->fields, v2), &g[es]); + gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]); return DISAS_NEXT; } @@ -1870,7 +1870,7 @@ static void gen_rll_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) static DisasJumpType op_verllv(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fno = gen_helper_gvec_verllv8, }, { .fno = gen_helper_gvec_verllv16, }, @@ -1883,14 +1883,14 @@ static DisasJumpType op_verllv(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } - gen_gvec_3(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), &g[es]); + gen_gvec_3(get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); return DISAS_NEXT; } static DisasJumpType op_verll(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); static const GVecGen2s g[4] = { { .fno = gen_helper_gvec_verll8, }, { .fno = gen_helper_gvec_verll16, }, @@ -1902,7 +1902,7 @@ static DisasJumpType op_verll(DisasContext *s, DisasOps *o) gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec_2s(get_field(s->fields, v1), get_field(s->fields, v3), o->addr1, + gen_gvec_2s(get_field(s, v1), get_field(s, v3), o->addr1, &g[es]); return DISAS_NEXT; } @@ -1933,8 +1933,8 @@ static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c) static DisasJumpType op_verim(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m5); - const uint8_t i4 = get_field(s->fields, i4) & + const uint8_t es = get_field(s, m5); + const uint8_t i4 = get_field(s, i4) & (NUM_VEC_ELEMENT_BITS(es) - 1); static const GVecGen3i g[4] = { { .fno = gen_helper_gvec_verim8, }, @@ -1950,24 +1950,24 @@ static DisasJumpType op_verim(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } - gen_gvec_3i(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), i4, &g[es]); + gen_gvec_3i(get_field(s, v1), get_field(s, v2), + get_field(s, v3), i4, &g[es]); return DISAS_NEXT; } static DisasJumpType op_vesv(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); - const uint8_t v1 = get_field(s->fields, v1); - const uint8_t v2 = get_field(s->fields, v2); - const uint8_t v3 = get_field(s->fields, v3); + const uint8_t es = get_field(s, m4); + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t v3 = get_field(s, v3); if (es > ES_64) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - switch (s->fields->op2) { + switch (s->fields.op2) { case 0x70: gen_gvec_fn_3(shlv, es, v1, v2, v3); break; @@ -1985,11 +1985,11 @@ static DisasJumpType op_vesv(DisasContext *s, DisasOps *o) static DisasJumpType op_ves(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); - const uint8_t d2 = get_field(s->fields, d2) & + const uint8_t es = get_field(s, m4); + const uint8_t d2 = get_field(s, d2) & (NUM_VEC_ELEMENT_BITS(es) - 1); - const uint8_t v1 = get_field(s->fields, v1); - const uint8_t v3 = get_field(s->fields, v3); + const uint8_t v1 = get_field(s, v1); + const uint8_t v3 = get_field(s, v3); TCGv_i32 shift; if (es > ES_64) { @@ -1997,8 +1997,8 @@ static DisasJumpType op_ves(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } - if (likely(!get_field(s->fields, b2))) { - switch (s->fields->op2) { + if (likely(!get_field(s, b2))) { + switch (s->fields.op2) { case 0x30: gen_gvec_fn_2i(shli, es, v1, v3, d2); break; @@ -2015,7 +2015,7 @@ static DisasJumpType op_ves(DisasContext *s, DisasOps *o) shift = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(shift, o->addr1); tcg_gen_andi_i32(shift, shift, NUM_VEC_ELEMENT_BITS(es) - 1); - switch (s->fields->op2) { + switch (s->fields.op2) { case 0x30: gen_gvec_fn_2s(shls, es, v1, v3, shift); break; @@ -2037,14 +2037,14 @@ static DisasJumpType op_vsl(DisasContext *s, DisasOps *o) { TCGv_i64 shift = tcg_temp_new_i64(); - read_vec_element_i64(shift, get_field(s->fields, v3), 7, ES_8); - if (s->fields->op2 == 0x74) { + read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); + if (s->fields.op2 == 0x74) { tcg_gen_andi_i64(shift, shift, 0x7); } else { tcg_gen_andi_i64(shift, shift, 0x78); } - gen_gvec_2i_ool(get_field(s->fields, v1), get_field(s->fields, v2), + gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), shift, 0, gen_helper_gvec_vsl); tcg_temp_free_i64(shift); return DISAS_NEXT; @@ -2052,7 +2052,7 @@ static DisasJumpType op_vsl(DisasContext *s, DisasOps *o) static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o) { - const uint8_t i4 = get_field(s->fields, i4) & 0xf; + const uint8_t i4 = get_field(s, i4) & 0xf; const int left_shift = (i4 & 7) * 8; const int right_shift = 64 - left_shift; TCGv_i64 t0 = tcg_temp_new_i64(); @@ -2060,18 +2060,18 @@ static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o) TCGv_i64 t2 = tcg_temp_new_i64(); if ((i4 & 8) == 0) { - read_vec_element_i64(t0, get_field(s->fields, v2), 0, ES_64); - read_vec_element_i64(t1, get_field(s->fields, v2), 1, ES_64); - read_vec_element_i64(t2, get_field(s->fields, v3), 0, ES_64); + read_vec_element_i64(t0, get_field(s, v2), 0, ES_64); + read_vec_element_i64(t1, get_field(s, v2), 1, ES_64); + read_vec_element_i64(t2, get_field(s, v3), 0, ES_64); } else { - read_vec_element_i64(t0, get_field(s->fields, v2), 1, ES_64); - read_vec_element_i64(t1, get_field(s->fields, v3), 0, ES_64); - read_vec_element_i64(t2, get_field(s->fields, v3), 1, ES_64); + read_vec_element_i64(t0, get_field(s, v2), 1, ES_64); + read_vec_element_i64(t1, get_field(s, v3), 0, ES_64); + read_vec_element_i64(t2, get_field(s, v3), 1, ES_64); } tcg_gen_extract2_i64(t0, t1, t0, right_shift); tcg_gen_extract2_i64(t1, t2, t1, right_shift); - write_vec_element_i64(t0, get_field(s->fields, v1), 0, ES_64); - write_vec_element_i64(t1, get_field(s->fields, v1), 1, ES_64); + write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); + write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); tcg_temp_free(t0); tcg_temp_free(t1); @@ -2083,14 +2083,14 @@ static DisasJumpType op_vsra(DisasContext *s, DisasOps *o) { TCGv_i64 shift = tcg_temp_new_i64(); - read_vec_element_i64(shift, get_field(s->fields, v3), 7, ES_8); - if (s->fields->op2 == 0x7e) { + read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); + if (s->fields.op2 == 0x7e) { tcg_gen_andi_i64(shift, shift, 0x7); } else { tcg_gen_andi_i64(shift, shift, 0x78); } - gen_gvec_2i_ool(get_field(s->fields, v1), get_field(s->fields, v2), + gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), shift, 0, gen_helper_gvec_vsra); tcg_temp_free_i64(shift); return DISAS_NEXT; @@ -2100,14 +2100,14 @@ static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o) { TCGv_i64 shift = tcg_temp_new_i64(); - read_vec_element_i64(shift, get_field(s->fields, v3), 7, ES_8); - if (s->fields->op2 == 0x7c) { + read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); + if (s->fields.op2 == 0x7c) { tcg_gen_andi_i64(shift, shift, 0x7); } else { tcg_gen_andi_i64(shift, shift, 0x78); } - gen_gvec_2i_ool(get_field(s->fields, v1), get_field(s->fields, v2), + gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), shift, 0, gen_helper_gvec_vsrl); tcg_temp_free_i64(shift); return DISAS_NEXT; @@ -2115,18 +2115,18 @@ static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o) static DisasJumpType op_vs(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); if (es > ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } else if (es == ES_128) { - gen_gvec128_3_i64(tcg_gen_sub2_i64, get_field(s->fields, v1), - get_field(s->fields, v2), get_field(s->fields, v3)); + gen_gvec128_3_i64(tcg_gen_sub2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } - gen_gvec_fn_3(sub, es, get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3)); + gen_gvec_fn_3(sub, es, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); return DISAS_NEXT; } @@ -2162,7 +2162,7 @@ static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); static const GVecGen3 g[4] = { { .fno = gen_helper_gvec_vscbi8, }, { .fno = gen_helper_gvec_vscbi16, }, @@ -2174,12 +2174,12 @@ static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o) gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } else if (es == ES_128) { - gen_gvec128_3_i64(gen_scbi2_i64, get_field(s->fields, v1), - get_field(s->fields, v2), get_field(s->fields, v3)); + gen_gvec128_3_i64(gen_scbi2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); return DISAS_NEXT; } - gen_gvec_3(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), &g[es]); + gen_gvec_3(get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); return DISAS_NEXT; } @@ -2198,14 +2198,14 @@ static void gen_sbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o) { - if (get_field(s->fields, m5) != ES_128) { + if (get_field(s, m5) != ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec128_4_i64(gen_sbi2_i64, get_field(s->fields, v1), - get_field(s->fields, v2), get_field(s->fields, v3), - get_field(s->fields, v4)); + gen_gvec128_4_i64(gen_sbi2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3), + get_field(s, v4)); return DISAS_NEXT; } @@ -2225,20 +2225,20 @@ static void gen_sbcbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o) { - if (get_field(s->fields, m5) != ES_128) { + if (get_field(s, m5) != ES_128) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - gen_gvec128_4_i64(gen_sbcbi2_i64, get_field(s->fields, v1), - get_field(s->fields, v2), get_field(s->fields, v3), - get_field(s->fields, v4)); + gen_gvec128_4_i64(gen_sbcbi2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3), + get_field(s, v4)); return DISAS_NEXT; } static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); TCGv_i64 sum, tmp; uint8_t dst_idx; @@ -2253,12 +2253,12 @@ static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o) uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 2; const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 2 - 1; - read_vec_element_i64(sum, get_field(s->fields, v3), max_idx, es); + read_vec_element_i64(sum, get_field(s, v3), max_idx, es); for (; idx <= max_idx; idx++) { - read_vec_element_i64(tmp, get_field(s->fields, v2), idx, es); + read_vec_element_i64(tmp, get_field(s, v2), idx, es); tcg_gen_add_i64(sum, sum, tmp); } - write_vec_element_i64(sum, get_field(s->fields, v1), dst_idx, ES_64); + write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64); } tcg_temp_free_i64(sum); tcg_temp_free_i64(tmp); @@ -2267,7 +2267,7 @@ static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o) static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); const uint8_t max_idx = NUM_VEC_ELEMENTS(es) - 1; TCGv_i64 sumh, suml, zero, tmpl; uint8_t idx; @@ -2282,13 +2282,13 @@ static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o) zero = tcg_const_i64(0); tmpl = tcg_temp_new_i64(); - read_vec_element_i64(suml, get_field(s->fields, v3), max_idx, es); + read_vec_element_i64(suml, get_field(s, v3), max_idx, es); for (idx = 0; idx <= max_idx; idx++) { - read_vec_element_i64(tmpl, get_field(s->fields, v2), idx, es); + read_vec_element_i64(tmpl, get_field(s, v2), idx, es); tcg_gen_add2_i64(suml, sumh, suml, sumh, tmpl, zero); } - write_vec_element_i64(sumh, get_field(s->fields, v1), 0, ES_64); - write_vec_element_i64(suml, get_field(s->fields, v1), 1, ES_64); + write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64); + write_vec_element_i64(suml, get_field(s, v1), 1, ES_64); tcg_temp_free_i64(sumh); tcg_temp_free_i64(suml); @@ -2299,7 +2299,7 @@ static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o) static DisasJumpType op_vsum(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); + const uint8_t es = get_field(s, m4); TCGv_i32 sum, tmp; uint8_t dst_idx; @@ -2314,12 +2314,12 @@ static DisasJumpType op_vsum(DisasContext *s, DisasOps *o) uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 4; const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 4 - 1; - read_vec_element_i32(sum, get_field(s->fields, v3), max_idx, es); + read_vec_element_i32(sum, get_field(s, v3), max_idx, es); for (; idx <= max_idx; idx++) { - read_vec_element_i32(tmp, get_field(s->fields, v2), idx, es); + read_vec_element_i32(tmp, get_field(s, v2), idx, es); tcg_gen_add_i32(sum, sum, tmp); } - write_vec_element_i32(sum, get_field(s->fields, v1), dst_idx, ES_32); + write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32); } tcg_temp_free_i32(sum); tcg_temp_free_i32(tmp); @@ -2328,7 +2328,7 @@ static DisasJumpType op_vsum(DisasContext *s, DisasOps *o) static DisasJumpType op_vtm(DisasContext *s, DisasOps *o) { - gen_gvec_2_ptr(get_field(s->fields, v1), get_field(s->fields, v2), + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, gen_helper_gvec_vtm); set_cc_static(s); return DISAS_NEXT; @@ -2336,8 +2336,8 @@ static DisasJumpType op_vtm(DisasContext *s, DisasOps *o) static DisasJumpType op_vfae(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); - const uint8_t m5 = get_field(s->fields, m5); + const uint8_t es = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); static gen_helper_gvec_3 * const g[3] = { gen_helper_gvec_vfae8, gen_helper_gvec_vfae16, @@ -2354,20 +2354,20 @@ static DisasJumpType op_vfae(DisasContext *s, DisasOps *o) } if (extract32(m5, 0, 1)) { - gen_gvec_3_ptr(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), cpu_env, m5, g_cc[es]); + gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), + get_field(s, v3), cpu_env, m5, g_cc[es]); set_cc_static(s); } else { - gen_gvec_3_ool(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), m5, g[es]); + gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), + get_field(s, v3), m5, g[es]); } return DISAS_NEXT; } static DisasJumpType op_vfee(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); - const uint8_t m5 = get_field(s->fields, m5); + const uint8_t es = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); static gen_helper_gvec_3 * const g[3] = { gen_helper_gvec_vfee8, gen_helper_gvec_vfee16, @@ -2385,20 +2385,20 @@ static DisasJumpType op_vfee(DisasContext *s, DisasOps *o) } if (extract32(m5, 0, 1)) { - gen_gvec_3_ptr(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), cpu_env, m5, g_cc[es]); + gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), + get_field(s, v3), cpu_env, m5, g_cc[es]); set_cc_static(s); } else { - gen_gvec_3_ool(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), m5, g[es]); + gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), + get_field(s, v3), m5, g[es]); } return DISAS_NEXT; } static DisasJumpType op_vfene(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); - const uint8_t m5 = get_field(s->fields, m5); + const uint8_t es = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); static gen_helper_gvec_3 * const g[3] = { gen_helper_gvec_vfene8, gen_helper_gvec_vfene16, @@ -2416,20 +2416,20 @@ static DisasJumpType op_vfene(DisasContext *s, DisasOps *o) } if (extract32(m5, 0, 1)) { - gen_gvec_3_ptr(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), cpu_env, m5, g_cc[es]); + gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), + get_field(s, v3), cpu_env, m5, g_cc[es]); set_cc_static(s); } else { - gen_gvec_3_ool(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), m5, g[es]); + gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), + get_field(s, v3), m5, g[es]); } return DISAS_NEXT; } static DisasJumpType op_vistr(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m4); - const uint8_t m5 = get_field(s->fields, m5); + const uint8_t es = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); static gen_helper_gvec_2 * const g[3] = { gen_helper_gvec_vistr8, gen_helper_gvec_vistr16, @@ -2447,11 +2447,11 @@ static DisasJumpType op_vistr(DisasContext *s, DisasOps *o) } if (extract32(m5, 0, 1)) { - gen_gvec_2_ptr(get_field(s->fields, v1), get_field(s->fields, v2), + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, g_cc[es]); set_cc_static(s); } else { - gen_gvec_2_ool(get_field(s->fields, v1), get_field(s->fields, v2), 0, + gen_gvec_2_ool(get_field(s, v1), get_field(s, v2), 0, g[es]); } return DISAS_NEXT; @@ -2459,8 +2459,8 @@ static DisasJumpType op_vistr(DisasContext *s, DisasOps *o) static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o) { - const uint8_t es = get_field(s->fields, m5); - const uint8_t m6 = get_field(s->fields, m6); + const uint8_t es = get_field(s, m5); + const uint8_t m6 = get_field(s, m6); static gen_helper_gvec_4 * const g[3] = { gen_helper_gvec_vstrc8, gen_helper_gvec_vstrc16, @@ -2489,23 +2489,23 @@ static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o) if (extract32(m6, 0, 1)) { if (extract32(m6, 2, 1)) { - gen_gvec_4_ptr(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), get_field(s->fields, v4), + gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), cpu_env, m6, g_cc_rt[es]); } else { - gen_gvec_4_ptr(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), get_field(s->fields, v4), + gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), cpu_env, m6, g_cc[es]); } set_cc_static(s); } else { if (extract32(m6, 2, 1)) { - gen_gvec_4_ool(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), get_field(s->fields, v4), + gen_gvec_4_ool(get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), m6, g_rt[es]); } else { - gen_gvec_4_ool(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), get_field(s->fields, v4), + gen_gvec_4_ool(get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), m6, g[es]); } } @@ -2514,8 +2514,8 @@ static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o) static DisasJumpType op_vfa(DisasContext *s, DisasOps *o) { - const uint8_t fpf = get_field(s->fields, m4); - const uint8_t m5 = get_field(s->fields, m5); + const uint8_t fpf = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); const bool se = extract32(m5, 3, 1); gen_helper_gvec_3_ptr *fn; @@ -2524,7 +2524,7 @@ static DisasJumpType op_vfa(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } - switch (s->fields->op2) { + switch (s->fields.op2) { case 0xe3: fn = se ? gen_helper_gvec_vfa64s : gen_helper_gvec_vfa64; break; @@ -2540,26 +2540,26 @@ static DisasJumpType op_vfa(DisasContext *s, DisasOps *o) default: g_assert_not_reached(); } - gen_gvec_3_ptr(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), cpu_env, 0, fn); + gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), + get_field(s, v3), cpu_env, 0, fn); return DISAS_NEXT; } static DisasJumpType op_wfc(DisasContext *s, DisasOps *o) { - const uint8_t fpf = get_field(s->fields, m3); - const uint8_t m4 = get_field(s->fields, m4); + const uint8_t fpf = get_field(s, m3); + const uint8_t m4 = get_field(s, m4); if (fpf != FPF_LONG || m4) { gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } - if (s->fields->op2 == 0xcb) { - gen_gvec_2_ptr(get_field(s->fields, v1), get_field(s->fields, v2), + if (s->fields.op2 == 0xcb) { + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, gen_helper_gvec_wfc64); } else { - gen_gvec_2_ptr(get_field(s->fields, v1), get_field(s->fields, v2), + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, gen_helper_gvec_wfk64); } set_cc_static(s); @@ -2568,9 +2568,9 @@ static DisasJumpType op_wfc(DisasContext *s, DisasOps *o) static DisasJumpType op_vfc(DisasContext *s, DisasOps *o) { - const uint8_t fpf = get_field(s->fields, m4); - const uint8_t m5 = get_field(s->fields, m5); - const uint8_t m6 = get_field(s->fields, m6); + const uint8_t fpf = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); + const uint8_t m6 = get_field(s, m6); const bool se = extract32(m5, 3, 1); const bool cs = extract32(m6, 0, 1); gen_helper_gvec_3_ptr *fn; @@ -2581,7 +2581,7 @@ static DisasJumpType op_vfc(DisasContext *s, DisasOps *o) } if (cs) { - switch (s->fields->op2) { + switch (s->fields.op2) { case 0xe8: fn = se ? gen_helper_gvec_vfce64s_cc : gen_helper_gvec_vfce64_cc; break; @@ -2595,7 +2595,7 @@ static DisasJumpType op_vfc(DisasContext *s, DisasOps *o) g_assert_not_reached(); } } else { - switch (s->fields->op2) { + switch (s->fields.op2) { case 0xe8: fn = se ? gen_helper_gvec_vfce64s : gen_helper_gvec_vfce64; break; @@ -2609,8 +2609,8 @@ static DisasJumpType op_vfc(DisasContext *s, DisasOps *o) g_assert_not_reached(); } } - gen_gvec_3_ptr(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), cpu_env, 0, fn); + gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), + get_field(s, v3), cpu_env, 0, fn); if (cs) { set_cc_static(s); } @@ -2619,9 +2619,9 @@ static DisasJumpType op_vfc(DisasContext *s, DisasOps *o) static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o) { - const uint8_t fpf = get_field(s->fields, m3); - const uint8_t m4 = get_field(s->fields, m4); - const uint8_t erm = get_field(s->fields, m5); + const uint8_t fpf = get_field(s, m3); + const uint8_t m4 = get_field(s, m4); + const uint8_t erm = get_field(s, m5); const bool se = extract32(m4, 3, 1); gen_helper_gvec_2_ptr *fn; @@ -2630,7 +2630,7 @@ static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } - switch (s->fields->op2) { + switch (s->fields.op2) { case 0xc3: fn = se ? gen_helper_gvec_vcdg64s : gen_helper_gvec_vcdg64; break; @@ -2652,15 +2652,15 @@ static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o) default: g_assert_not_reached(); } - gen_gvec_2_ptr(get_field(s->fields, v1), get_field(s->fields, v2), cpu_env, + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, deposit32(m4, 4, 4, erm), fn); return DISAS_NEXT; } static DisasJumpType op_vfll(DisasContext *s, DisasOps *o) { - const uint8_t fpf = get_field(s->fields, m3); - const uint8_t m4 = get_field(s->fields, m4); + const uint8_t fpf = get_field(s, m3); + const uint8_t m4 = get_field(s, m4); gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vfll32; if (fpf != FPF_SHORT || extract32(m4, 0, 3)) { @@ -2671,15 +2671,15 @@ static DisasJumpType op_vfll(DisasContext *s, DisasOps *o) if (extract32(m4, 3, 1)) { fn = gen_helper_gvec_vfll32s; } - gen_gvec_2_ptr(get_field(s->fields, v1), get_field(s->fields, v2), cpu_env, + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, fn); return DISAS_NEXT; } static DisasJumpType op_vfma(DisasContext *s, DisasOps *o) { - const uint8_t m5 = get_field(s->fields, m5); - const uint8_t fpf = get_field(s->fields, m6); + const uint8_t m5 = get_field(s, m5); + const uint8_t fpf = get_field(s, m6); const bool se = extract32(m5, 3, 1); gen_helper_gvec_4_ptr *fn; @@ -2688,24 +2688,24 @@ static DisasJumpType op_vfma(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } - if (s->fields->op2 == 0x8f) { + if (s->fields.op2 == 0x8f) { fn = se ? gen_helper_gvec_vfma64s : gen_helper_gvec_vfma64; } else { fn = se ? gen_helper_gvec_vfms64s : gen_helper_gvec_vfms64; } - gen_gvec_4_ptr(get_field(s->fields, v1), get_field(s->fields, v2), - get_field(s->fields, v3), get_field(s->fields, v4), cpu_env, + gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), cpu_env, 0, fn); return DISAS_NEXT; } static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o) { - const uint8_t v1 = get_field(s->fields, v1); - const uint8_t v2 = get_field(s->fields, v2); - const uint8_t fpf = get_field(s->fields, m3); - const uint8_t m4 = get_field(s->fields, m4); - const uint8_t m5 = get_field(s->fields, m5); + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t fpf = get_field(s, m3); + const uint8_t m4 = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); TCGv_i64 tmp; if (fpf != FPF_LONG || extract32(m4, 0, 3) || m5 > 2) { @@ -2753,8 +2753,8 @@ static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o) static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o) { - const uint8_t fpf = get_field(s->fields, m3); - const uint8_t m4 = get_field(s->fields, m4); + const uint8_t fpf = get_field(s, m3); + const uint8_t m4 = get_field(s, m4); gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vfsq64; if (fpf != FPF_LONG || extract32(m4, 0, 3)) { @@ -2765,16 +2765,16 @@ static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o) if (extract32(m4, 3, 1)) { fn = gen_helper_gvec_vfsq64s; } - gen_gvec_2_ptr(get_field(s->fields, v1), get_field(s->fields, v2), cpu_env, + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, fn); return DISAS_NEXT; } static DisasJumpType op_vftci(DisasContext *s, DisasOps *o) { - const uint16_t i3 = get_field(s->fields, i3); - const uint8_t fpf = get_field(s->fields, m4); - const uint8_t m5 = get_field(s->fields, m5); + const uint16_t i3 = get_field(s, i3); + const uint8_t fpf = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vftci64; if (fpf != FPF_LONG || extract32(m5, 0, 3)) { @@ -2785,8 +2785,7 @@ static DisasJumpType op_vftci(DisasContext *s, DisasOps *o) if (extract32(m5, 3, 1)) { fn = gen_helper_gvec_vftci64s; } - gen_gvec_2_ptr(get_field(s->fields, v1), get_field(s->fields, v2), cpu_env, - i3, fn); + gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, i3, fn); set_cc_static(s); return DISAS_NEXT; } |