aboutsummaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/cpu.h2
-rw-r--r--target-ppc/excp_helper.c4
-rw-r--r--target-ppc/helper.h19
-rw-r--r--target-ppc/helper_regs.h25
-rw-r--r--target-ppc/int_helper.c119
-rw-r--r--target-ppc/kvm.c35
-rw-r--r--target-ppc/kvm_ppc.h5
-rw-r--r--target-ppc/mmu-hash64.c6
-rw-r--r--target-ppc/mmu_helper.c25
-rw-r--r--target-ppc/translate.c421
-rw-r--r--target-ppc/translate/fp-impl.inc.c84
-rw-r--r--target-ppc/translate/fp-ops.inc.c2
-rw-r--r--target-ppc/translate/spe-impl.inc.c4
-rw-r--r--target-ppc/translate/vmx-impl.inc.c104
-rw-r--r--target-ppc/translate/vmx-ops.inc.c38
-rw-r--r--target-ppc/translate/vsx-impl.inc.c39
-rw-r--r--target-ppc/translate/vsx-ops.inc.c9
17 files changed, 627 insertions, 314 deletions
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index 9617481196..1c90adb5d7 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -1009,6 +1009,8 @@ struct CPUPPCState {
bool tlb_dirty; /* Set to non-zero when modifying TLB */
bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */
uint32_t tlb_need_flush; /* Delayed flush needed */
+#define TLB_NEED_LOCAL_FLUSH 0x1
+#define TLB_NEED_GLOBAL_FLUSH 0x2
#endif
/* Other registers */
diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c
index 04ed4da1f4..921c39d33f 100644
--- a/target-ppc/excp_helper.c
+++ b/target-ppc/excp_helper.c
@@ -711,7 +711,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* Any interrupt is context synchronizing, check if TCG TLB
* needs a delayed flush on ppc64
*/
- check_tlb_flush(env);
+ check_tlb_flush(env, false);
}
void ppc_cpu_do_interrupt(CPUState *cs)
@@ -973,7 +973,7 @@ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
/* Context synchronizing: check if TCG TLB needs flush */
- check_tlb_flush(env);
+ check_tlb_flush(env, false);
}
void helper_rfi(CPUPPCState *env)
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
index dcf3f950b6..a1c29628bd 100644
--- a/target-ppc/helper.h
+++ b/target-ppc/helper.h
@@ -18,7 +18,8 @@ DEF_HELPER_1(rfid, void, env)
DEF_HELPER_1(hrfid, void, env)
DEF_HELPER_2(store_lpcr, void, env, tl)
#endif
-DEF_HELPER_1(check_tlb_flush, void, env)
+DEF_HELPER_1(check_tlb_flush_local, void, env)
+DEF_HELPER_1(check_tlb_flush_global, void, env)
#endif
DEF_HELPER_3(lmw, void, env, tl, i32)
@@ -50,6 +51,8 @@ DEF_HELPER_FLAGS_1(cnttzd, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_1(popcntd, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_2(bpermd, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_3(srad, tl, env, tl, tl)
+DEF_HELPER_0(darn32, tl)
+DEF_HELPER_0(darn64, tl)
#endif
DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_NO_RWG_SE, i32, i32)
@@ -250,6 +253,14 @@ DEF_HELPER_2(vspltisw, void, avr, i32)
DEF_HELPER_3(vspltb, void, avr, avr, i32)
DEF_HELPER_3(vsplth, void, avr, avr, i32)
DEF_HELPER_3(vspltw, void, avr, avr, i32)
+DEF_HELPER_3(vextractub, void, avr, avr, i32)
+DEF_HELPER_3(vextractuh, void, avr, avr, i32)
+DEF_HELPER_3(vextractuw, void, avr, avr, i32)
+DEF_HELPER_3(vextractd, void, avr, avr, i32)
+DEF_HELPER_3(vinsertb, void, avr, avr, i32)
+DEF_HELPER_3(vinserth, void, avr, avr, i32)
+DEF_HELPER_3(vinsertw, void, avr, avr, i32)
+DEF_HELPER_3(vinsertd, void, avr, avr, i32)
DEF_HELPER_2(vupkhpx, void, avr, avr)
DEF_HELPER_2(vupklpx, void, avr, avr)
DEF_HELPER_2(vupkhsb, void, avr, avr)
@@ -262,6 +273,7 @@ DEF_HELPER_5(vmsumubm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vmsummbm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vsel, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vperm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vpermr, void, env, avr, avr, avr, avr)
DEF_HELPER_4(vpkshss, void, env, avr, avr, avr)
DEF_HELPER_4(vpkshus, void, env, avr, avr, avr)
DEF_HELPER_4(vpkswss, void, env, avr, avr, avr)
@@ -317,10 +329,15 @@ DEF_HELPER_2(vclzb, void, avr, avr)
DEF_HELPER_2(vclzh, void, avr, avr)
DEF_HELPER_2(vclzw, void, avr, avr)
DEF_HELPER_2(vclzd, void, avr, avr)
+DEF_HELPER_2(vctzb, void, avr, avr)
+DEF_HELPER_2(vctzh, void, avr, avr)
+DEF_HELPER_2(vctzw, void, avr, avr)
+DEF_HELPER_2(vctzd, void, avr, avr)
DEF_HELPER_2(vpopcntb, void, avr, avr)
DEF_HELPER_2(vpopcnth, void, avr, avr)
DEF_HELPER_2(vpopcntw, void, avr, avr)
DEF_HELPER_2(vpopcntd, void, avr, avr)
+DEF_HELPER_3(vbpermd, void, avr, avr, avr)
DEF_HELPER_3(vbpermq, void, avr, avr, avr)
DEF_HELPER_2(vgbbd, void, avr, avr)
DEF_HELPER_3(vpmsumb, void, avr, avr, avr)
diff --git a/target-ppc/helper_regs.h b/target-ppc/helper_regs.h
index 3d279f1d8a..bb9ce60436 100644
--- a/target-ppc/helper_regs.h
+++ b/target-ppc/helper_regs.h
@@ -154,16 +154,33 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
}
#if !defined(CONFIG_USER_ONLY)
-static inline void check_tlb_flush(CPUPPCState *env)
+static inline void check_tlb_flush(CPUPPCState *env, bool global)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
- if (env->tlb_need_flush) {
- env->tlb_need_flush = 0;
+ if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
tlb_flush(cs, 1);
+ env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
+ }
+
+ /* Propagate TLB invalidations to other CPUs when the guest uses broadcast
+ * TLB invalidation instructions.
+ */
+ if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
+ CPUState *other_cs;
+ CPU_FOREACH(other_cs) {
+ if (other_cs != cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(other_cs);
+ CPUPPCState *other_env = &cpu->env;
+
+ other_env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
+ tlb_flush(other_cs, 1);
+ }
+ }
+ env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
}
}
#else
-static inline void check_tlb_flush(CPUPPCState *env) { }
+static inline void check_tlb_flush(CPUPPCState *env, bool global) { }
#endif
#endif /* HELPER_REGS_H */
diff --git a/target-ppc/int_helper.c b/target-ppc/int_helper.c
index 552b2e041b..51a9ac5182 100644
--- a/target-ppc/int_helper.c
+++ b/target-ppc/int_helper.c
@@ -182,6 +182,22 @@ target_ulong helper_cnttzd(target_ulong t)
{
return ctz64(t);
}
+
+/* Return invalid random number.
+ *
+ * FIXME: Add rng backend or other mechanism to get cryptographically suitable
+ * random number
+ */
+target_ulong helper_darn32(void)
+{
+ return -1;
+}
+
+target_ulong helper_darn64(void)
+{
+ return -1;
+}
+
#endif
#if defined(TARGET_PPC64)
@@ -1126,14 +1142,57 @@ void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
*r = result;
}
+void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
+ ppc_avr_t *c)
+{
+ ppc_avr_t result;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ int s = c->u8[i] & 0x1f;
+#if defined(HOST_WORDS_BIGENDIAN)
+ int index = 15 - (s & 0xf);
+#else
+ int index = s & 0xf;
+#endif
+
+ if (s & 0x10) {
+ result.u8[i] = a->u8[index];
+ } else {
+ result.u8[i] = b->u8[index];
+ }
+ }
+ *r = result;
+}
+
#if defined(HOST_WORDS_BIGENDIAN)
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)])
+#define VBPERMD_INDEX(i) (i)
#define VBPERMQ_DW(index) (((index) & 0x40) != 0)
+#define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1))
#else
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15-(i)])
+#define VBPERMD_INDEX(i) (1 - i)
#define VBPERMQ_DW(index) (((index) & 0x40) == 0)
+#define EXTRACT_BIT(avr, i, index) \
+ (extract64((avr)->u64[1 - i], 63 - index, 1))
#endif
+void helper_vbpermd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ ppc_avr_t result = { .u64 = { 0, 0 } };
+ VECTOR_FOR_INORDER_I(i, u64) {
+ for (j = 0; j < 8; j++) {
+ int index = VBPERMQ_INDEX(b, (i * 8) + j);
+ if (index < 64 && EXTRACT_BIT(a, i, index)) {
+ result.u64[VBPERMD_INDEX(i)] |= (0x80 >> j);
+ }
+ }
+ }
+ *r = result;
+}
+
void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int i;
@@ -1792,6 +1851,51 @@ VSPLT(w, u32)
#undef VSPLT
#undef SPLAT_ELEMENT
#undef _SPLAT_MASKED
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VINSERT(suffix, element) \
+ void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ memmove(&r->u8[index], &b->u8[8 - sizeof(r->element)], \
+ sizeof(r->element[0])); \
+ }
+#else
+#define VINSERT(suffix, element) \
+ void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ uint32_t d = (16 - index) - sizeof(r->element[0]); \
+ memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \
+ }
+#endif
+VINSERT(b, u8)
+VINSERT(h, u16)
+VINSERT(w, u32)
+VINSERT(d, u64)
+#undef VINSERT
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VEXTRACT(suffix, element) \
+ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ uint32_t es = sizeof(r->element[0]); \
+ memmove(&r->u8[8 - es], &b->u8[index], es); \
+ memset(&r->u8[8], 0, 8); \
+ memset(&r->u8[0], 0, 8 - es); \
+ }
+#else
+#define VEXTRACT(suffix, element) \
+ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ uint32_t es = sizeof(r->element[0]); \
+ uint32_t s = (16 - index) - es; \
+ memmove(&r->u8[8], &b->u8[s], es); \
+ memset(&r->u8[0], 0, 8); \
+ memset(&r->u8[8 + es], 0, 8 - es); \
+ }
+#endif
+VEXTRACT(ub, u8)
+VEXTRACT(uh, u16)
+VEXTRACT(uw, u32)
+VEXTRACT(d, u64)
+#undef VEXTRACT
#define VSPLTI(suffix, element, splat_type) \
void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat) \
@@ -2038,6 +2142,21 @@ VGENERIC_DO(clzd, u64)
#undef clzw
#undef clzd
+#define ctzb(v) ((v) ? ctz32(v) : 8)
+#define ctzh(v) ((v) ? ctz32(v) : 16)
+#define ctzw(v) ctz32((v))
+#define ctzd(v) ctz64((v))
+
+VGENERIC_DO(ctzb, u8)
+VGENERIC_DO(ctzh, u16)
+VGENERIC_DO(ctzw, u32)
+VGENERIC_DO(ctzd, u64)
+
+#undef ctzb
+#undef ctzh
+#undef ctzw
+#undef ctzd
+
#define popcntb(v) ctpop8(v)
#define popcnth(v) ctpop16(v)
#define popcntw(v) ctpop32(v)
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index dcb68b9081..a18d4d5654 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -36,6 +36,7 @@
#include "hw/sysbus.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
+#include "hw/ppc/spapr_cpu_core.h"
#include "hw/ppc/ppc.h"
#include "sysemu/watchdog.h"
#include "trace.h"
@@ -427,6 +428,7 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
CPUPPCState *env = &cpu->env;
long rampagesize;
int iq, ik, jq, jk;
+ bool has_64k_pages = false;
/* We only handle page sizes for 64-bit server guests for now */
if (!(env->mmu_model & POWERPC_MMU_64)) {
@@ -470,6 +472,9 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
ksps->enc[jk].page_shift)) {
continue;
}
+ if (ksps->enc[jk].page_shift == 16) {
+ has_64k_pages = true;
+ }
qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
@@ -484,6 +489,9 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
env->mmu_model &= ~POWERPC_MMU_1TSEG;
}
+ if (!has_64k_pages) {
+ env->mmu_model &= ~POWERPC_MMU_64K;
+ }
}
#else /* defined (TARGET_PPC64) */
@@ -2055,6 +2063,12 @@ void kvmppc_enable_set_mode_hcall(void)
kvmppc_enable_hcall(kvm_state, H_SET_MODE);
}
+void kvmppc_enable_clear_ref_mod_hcalls(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
+ kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
+}
+
void kvmppc_set_papr(PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
@@ -2364,19 +2378,6 @@ PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
return pvr_pcc;
}
-#if defined(TARGET_PPC64)
-static void spapr_cpu_core_host_initfn(Object *obj)
-{
- sPAPRCPUCore *core = SPAPR_CPU_CORE(obj);
- char *name = g_strdup_printf("%s-" TYPE_POWERPC_CPU, "host");
- ObjectClass *oc = object_class_by_name(name);
-
- g_assert(oc);
- g_free((void *)name);
- core->cpu_class = oc;
-}
-#endif
-
static int kvm_ppc_register_host_cpu_type(void)
{
TypeInfo type_info = {
@@ -2404,14 +2405,16 @@ static int kvm_ppc_register_host_cpu_type(void)
#if defined(TARGET_PPC64)
type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
type_info.parent = TYPE_SPAPR_CPU_CORE,
- type_info.instance_size = sizeof(sPAPRCPUCore),
- type_info.instance_init = spapr_cpu_core_host_initfn,
- type_info.class_init = NULL;
+ type_info.instance_size = sizeof(sPAPRCPUCore);
+ type_info.instance_init = NULL;
+ type_info.class_init = spapr_cpu_core_class_init;
+ type_info.class_data = (void *) "host";
type_register(&type_info);
g_free((void *)type_info.name);
/* Register generic spapr CPU family class for current host CPU type */
type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, dc->desc);
+ type_info.class_data = (void *) dc->desc;
type_register(&type_info);
g_free((void *)type_info.name);
#endif
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
index 5461d1082c..a778184567 100644
--- a/target-ppc/kvm_ppc.h
+++ b/target-ppc/kvm_ppc.h
@@ -24,6 +24,7 @@ int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len);
int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level);
void kvmppc_enable_logical_ci_hcalls(void);
void kvmppc_enable_set_mode_hcall(void);
+void kvmppc_enable_clear_ref_mod_hcalls(void);
void kvmppc_set_papr(PowerPCCPU *cpu);
int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version);
void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy);
@@ -113,6 +114,10 @@ static inline void kvmppc_enable_set_mode_hcall(void)
{
}
+static inline void kvmppc_enable_clear_ref_mod_hcalls(void)
+{
+}
+
static inline void kvmppc_set_papr(PowerPCCPU *cpu)
{
}
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 8118143874..fdb7a787bf 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -110,7 +110,7 @@ void helper_slbia(CPUPPCState *env)
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
- env->tlb_need_flush = 1;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
}
}
}
@@ -132,7 +132,7 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
- env->tlb_need_flush = 1;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
}
}
@@ -912,7 +912,7 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
* invalidate, and we still don't have a tlb_flush_mask(env, n,
* mask) in QEMU, we just invalidate all TLBs
*/
- tlb_flush(CPU(cpu), 1);
+ cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
}
void ppc_hash64_update_rmls(CPUPPCState *env)
diff --git a/target-ppc/mmu_helper.c b/target-ppc/mmu_helper.c
index 696bb03ab5..d09fc0a85f 100644
--- a/target-ppc/mmu_helper.c
+++ b/target-ppc/mmu_helper.c
@@ -1965,7 +1965,7 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
* we just mark the TLB to be flushed later (context synchronizing
* event or sync instruction on 32-bit).
*/
- env->tlb_need_flush = 1;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
break;
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
@@ -1979,7 +1979,7 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
* and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
* we just invalidate all TLBs
*/
- env->tlb_need_flush = 1;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
break;
#endif /* defined(TARGET_PPC64) */
default:
@@ -2065,7 +2065,7 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
}
}
#else
- env->tlb_need_flush = 1;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
#endif
}
}
@@ -2757,7 +2757,7 @@ static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ CPUState *cs;
if (address & 0x4) {
/* flush all entries */
@@ -2774,11 +2774,15 @@ void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
if (address & 0x8) {
/* flush TLB1 entries */
booke206_invalidate_ea_tlb(env, 1, address);
- tlb_flush(CPU(cpu), 1);
+ CPU_FOREACH(cs) {
+ tlb_flush(cs, 1);
+ }
} else {
/* flush TLB0 entries */
booke206_invalidate_ea_tlb(env, 0, address);
- tlb_flush_page(CPU(cpu), address & MAS2_EPN_MASK);
+ CPU_FOREACH(cs) {
+ tlb_flush_page(cs, address & MAS2_EPN_MASK);
+ }
}
}
@@ -2867,9 +2871,14 @@ void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
}
-void helper_check_tlb_flush(CPUPPCState *env)
+void helper_check_tlb_flush_local(CPUPPCState *env)
+{
+ check_tlb_flush(env, false);
+}
+
+void helper_check_tlb_flush_global(CPUPPCState *env)
{
- check_tlb_flush(env);
+ check_tlb_flush(env, true);
}
/*****************************************************************************/
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 618334ae51..8eefd8231d 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -498,6 +498,8 @@ EXTRACT_HELPER(UIMM, 0, 16);
EXTRACT_HELPER(SIMM5, 16, 5);
/* 5 bits signed immediate value */
EXTRACT_HELPER(UIMM5, 16, 5);
+/* 4 bits unsigned immediate value */
+EXTRACT_HELPER(UIMM4, 16, 4);
/* Bit count */
EXTRACT_HELPER(NB, 11, 5);
/* Shift count */
@@ -526,6 +528,10 @@ EXTRACT_HELPER(FPW, 16, 1);
/* addpcis */
EXTRACT_HELPER_DXFORM(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0)
+#if defined(TARGET_PPC64)
+/* darn */
+EXTRACT_HELPER(L, 16, 2);
+#endif
/*** Jump target decoding ***/
/* Immediate address */
@@ -589,6 +595,8 @@ EXTRACT_HELPER(DM, 8, 2);
EXTRACT_HELPER(UIM, 16, 2);
EXTRACT_HELPER(SHW, 8, 2);
EXTRACT_HELPER(SP, 19, 2);
+EXTRACT_HELPER(IMM8, 11, 8);
+
/*****************************************************************************/
/* PowerPC instructions table */
@@ -1891,6 +1899,21 @@ static void gen_cnttzd(DisasContext *ctx)
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
}
+
+/* darn */
+static void gen_darn(DisasContext *ctx)
+{
+ int l = L(ctx->opcode);
+
+ if (l == 0) {
+ gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
+ } else if (l <= 2) {
+ /* Return 64-bit random for both CRN and RRN */
+ gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
+ } else {
+ tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
+ }
+}
#endif
/*** Integer rotate ***/
@@ -2460,87 +2483,75 @@ static inline void gen_align_no_le(DisasContext *ctx)
}
/*** Integer load ***/
-static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
-}
+#define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
+#define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
-static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
+#define GEN_QEMU_LOAD_TL(ldop, op) \
+static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
+ TCGv val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
}
-static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_SW | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB))
+GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
+GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
+GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
+GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
-static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
+GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
-static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
-{
- TCGv tmp = tcg_temp_new();
- gen_qemu_ld32u(ctx, tmp, addr);
- tcg_gen_extu_tl_i64(val, tmp);
- tcg_temp_free(tmp);
+#define GEN_QEMU_LOAD_64(ldop, op) \
+static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
+ TCGv_i64 val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
}
-static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_SL | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
+GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
+GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
+GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
+GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
-static void gen_qemu_ld32s_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
-{
- TCGv tmp = tcg_temp_new();
- gen_qemu_ld32s(ctx, tmp, addr);
- tcg_gen_ext_tl_i64(val, tmp);
- tcg_temp_free(tmp);
-}
+#if defined(TARGET_PPC64)
+GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
+#endif
-static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
+#define GEN_QEMU_STORE_TL(stop, op) \
+static void glue(gen_qemu_, stop)(DisasContext *ctx, \
+ TCGv val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
}
-static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
-}
+GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB))
+GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
+GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
-static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
-}
+GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
+GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
-static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
+#define GEN_QEMU_STORE_64(stop, op) \
+static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
+ TCGv_i64 val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
}
-static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
-{
- TCGv tmp = tcg_temp_new();
- tcg_gen_trunc_i64_tl(tmp, val);
- gen_qemu_st32(ctx, tmp, addr);
- tcg_temp_free(tmp);
-}
+GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
+GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
+GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
+GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
-static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
- tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
-}
+#if defined(TARGET_PPC64)
+GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
+#endif
#define GEN_LD(name, ldop, opc, type) \
static void glue(gen_, name)(DisasContext *ctx) \
@@ -2628,12 +2639,12 @@ GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
/* lwax */
GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
/* ldux */
-GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
+GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B);
/* ldx */
-GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
+GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B);
/* CI load/store variants */
-GEN_LDX_HVRM(ldcix, ld64, 0x15, 0x1b, PPC_CILDST)
+GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
@@ -2656,7 +2667,7 @@ static void gen_ld(DisasContext *ctx)
gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
} else {
/* ld - ldu */
- gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
}
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
@@ -2693,16 +2704,16 @@ static void gen_lq(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x0F);
- /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
- 64-bit byteswap already. */
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
+ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
} else {
- gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
}
tcg_temp_free(EA);
}
@@ -2785,9 +2796,9 @@ GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
/* stw stwu stwux stwx */
GEN_STS(stw, st32, 0x04, PPC_INTEGER);
#if defined(TARGET_PPC64)
-GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
-GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
-GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
+GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B);
+GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B);
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
@@ -2824,16 +2835,16 @@ static void gen_std(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
- /* We only need to swap high and low halves. gen_qemu_st64 does
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
} else {
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
}
tcg_temp_free(EA);
} else {
@@ -2847,7 +2858,7 @@ static void gen_std(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
- gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
tcg_temp_free(EA);
@@ -2857,57 +2868,23 @@ static void gen_std(DisasContext *ctx)
/*** Integer load and store with byte reverse ***/
/* lhbrx */
-static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
/* lwbrx */
-static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
#if defined(TARGET_PPC64)
/* ldbrx */
-static inline void gen_qemu_ld64ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
-}
-GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
+GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
+/* stdbrx */
+GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
#endif /* TARGET_PPC64 */
/* sthbrx */
-static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
-
/* stwbrx */
-static inline void gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
-}
GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
-#if defined(TARGET_PPC64)
-/* stdbrx */
-static inline void gen_qemu_st64r(DisasContext *ctx, TCGv arg1, TCGv arg2)
-{
- TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
- tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
-}
-GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
-#endif /* TARGET_PPC64 */
-
/*** Integer load and store multiple ***/
/* lmw */
@@ -3064,7 +3041,7 @@ static void gen_eieio(DisasContext *ctx)
}
#if !defined(CONFIG_USER_ONLY)
-static inline void gen_check_tlb_flush(DisasContext *ctx)
+static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
{
TCGv_i32 t;
TCGLabel *l;
@@ -3076,12 +3053,16 @@ static inline void gen_check_tlb_flush(DisasContext *ctx)
t = tcg_temp_new_i32();
tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
- gen_helper_check_tlb_flush(cpu_env);
+ if (global) {
+ gen_helper_check_tlb_flush_global(cpu_env);
+ } else {
+ gen_helper_check_tlb_flush_local(cpu_env);
+ }
gen_set_label(l);
tcg_temp_free_i32(t);
}
#else
-static inline void gen_check_tlb_flush(DisasContext *ctx) { }
+static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
#endif
/* isync */
@@ -3092,49 +3073,51 @@ static void gen_isync(DisasContext *ctx)
* kernel mode however so check MSR_PR
*/
if (!ctx->pr) {
- gen_check_tlb_flush(ctx);
+ gen_check_tlb_flush(ctx, false);
}
gen_stop_exception(ctx);
}
-#define LARX(name, len, loadop) \
+#define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
+
+#define LARX(name, memop) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv t0; \
TCGv gpr = cpu_gpr[rD(ctx->opcode)]; \
+ int len = MEMOP_GET_SIZE(memop); \
gen_set_access_type(ctx, ACCESS_RES); \
t0 = tcg_temp_local_new(); \
gen_addr_reg_index(ctx, t0); \
if ((len) > 1) { \
gen_check_align(ctx, t0, (len)-1); \
} \
- gen_qemu_##loadop(ctx, gpr, t0); \
+ tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop); \
tcg_gen_mov_tl(cpu_reserve, t0); \
tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUPPCState, reserve_val)); \
tcg_temp_free(t0); \
}
/* lwarx */
-LARX(lbarx, 1, ld8u);
-LARX(lharx, 2, ld16u);
-LARX(lwarx, 4, ld32u);
-
+LARX(lbarx, DEF_MEMOP(MO_UB))
+LARX(lharx, DEF_MEMOP(MO_UW))
+LARX(lwarx, DEF_MEMOP(MO_UL))
#if defined(CONFIG_USER_ONLY)
static void gen_conditional_store(DisasContext *ctx, TCGv EA,
- int reg, int size)
+ int reg, int memop)
{
TCGv t0 = tcg_temp_new();
tcg_gen_st_tl(EA, cpu_env, offsetof(CPUPPCState, reserve_ea));
- tcg_gen_movi_tl(t0, (size << 5) | reg);
+ tcg_gen_movi_tl(t0, (MEMOP_GET_SIZE(memop) << 5) | reg);
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, reserve_info));
tcg_temp_free(t0);
gen_exception_err(ctx, POWERPC_EXCP_STCX, 0);
}
#else
static void gen_conditional_store(DisasContext *ctx, TCGv EA,
- int reg, int size)
+ int reg, int memop)
{
TCGLabel *l1;
@@ -3142,65 +3125,36 @@ static void gen_conditional_store(DisasContext *ctx, TCGv EA,
l1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
-#if defined(TARGET_PPC64)
- if (size == 8) {
- gen_qemu_st64(ctx, cpu_gpr[reg], EA);
- } else
-#endif
- if (size == 4) {
- gen_qemu_st32(ctx, cpu_gpr[reg], EA);
- } else if (size == 2) {
- gen_qemu_st16(ctx, cpu_gpr[reg], EA);
-#if defined(TARGET_PPC64)
- } else if (size == 16) {
- TCGv gpr1, gpr2 , EA8;
- if (unlikely(ctx->le_mode)) {
- gpr1 = cpu_gpr[reg+1];
- gpr2 = cpu_gpr[reg];
- } else {
- gpr1 = cpu_gpr[reg];
- gpr2 = cpu_gpr[reg+1];
- }
- gen_qemu_st64(ctx, gpr1, EA);
- EA8 = tcg_temp_local_new();
- gen_addr_add(ctx, EA8, EA, 8);
- gen_qemu_st64(ctx, gpr2, EA8);
- tcg_temp_free(EA8);
-#endif
- } else {
- gen_qemu_st8(ctx, cpu_gpr[reg], EA);
- }
+ tcg_gen_qemu_st_tl(cpu_gpr[reg], EA, ctx->mem_idx, memop);
gen_set_label(l1);
tcg_gen_movi_tl(cpu_reserve, -1);
}
#endif
-#define STCX(name, len) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv t0; \
- if (unlikely((len == 16) && (rD(ctx->opcode) & 1))) { \
- gen_inval_exception(ctx, \
- POWERPC_EXCP_INVAL_INVAL); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_RES); \
- t0 = tcg_temp_local_new(); \
- gen_addr_reg_index(ctx, t0); \
- if (len > 1) { \
- gen_check_align(ctx, t0, (len)-1); \
- } \
- gen_conditional_store(ctx, t0, rS(ctx->opcode), len); \
- tcg_temp_free(t0); \
-}
-
-STCX(stbcx_, 1);
-STCX(sthcx_, 2);
-STCX(stwcx_, 4);
+#define STCX(name, memop) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv t0; \
+ int len = MEMOP_GET_SIZE(memop); \
+ gen_set_access_type(ctx, ACCESS_RES); \
+ t0 = tcg_temp_local_new(); \
+ gen_addr_reg_index(ctx, t0); \
+ if (len > 1) { \
+ gen_check_align(ctx, t0, (len) - 1); \
+ } \
+ gen_conditional_store(ctx, t0, rS(ctx->opcode), memop); \
+ tcg_temp_free(t0); \
+}
+
+STCX(stbcx_, DEF_MEMOP(MO_UB))
+STCX(sthcx_, DEF_MEMOP(MO_UW))
+STCX(stwcx_, DEF_MEMOP(MO_UL))
#if defined(TARGET_PPC64)
/* ldarx */
-LARX(ldarx, 8, ld64);
+LARX(ldarx, DEF_MEMOP(MO_Q))
+/* stdcx. */
+STCX(stdcx_, DEF_MEMOP(MO_Q))
/* lqarx */
static void gen_lqarx(DisasContext *ctx)
@@ -3226,21 +3180,63 @@ static void gen_lqarx(DisasContext *ctx)
gpr1 = cpu_gpr[rd];
gpr2 = cpu_gpr[rd+1];
}
- gen_qemu_ld64(ctx, gpr1, EA);
+ tcg_gen_qemu_ld_i64(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
tcg_gen_mov_tl(cpu_reserve, EA);
-
gen_addr_add(ctx, EA, EA, 8);
- gen_qemu_ld64(ctx, gpr2, EA);
+ tcg_gen_qemu_ld_i64(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
tcg_gen_st_tl(gpr1, cpu_env, offsetof(CPUPPCState, reserve_val));
tcg_gen_st_tl(gpr2, cpu_env, offsetof(CPUPPCState, reserve_val2));
+ tcg_temp_free(EA);
+}
+
+/* stqcx. */
+static void gen_stqcx_(DisasContext *ctx)
+{
+ TCGv EA;
+ int reg = rS(ctx->opcode);
+ int len = 16;
+#if !defined(CONFIG_USER_ONLY)
+ TCGLabel *l1;
+ TCGv gpr1, gpr2;
+#endif
+
+ if (unlikely((rD(ctx->opcode) & 1))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_RES);
+ EA = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, EA);
+ if (len > 1) {
+ gen_check_align(ctx, EA, (len) - 1);
+ }
+
+#if defined(CONFIG_USER_ONLY)
+ gen_conditional_store(ctx, EA, reg, 16);
+#else
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ l1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+ if (unlikely(ctx->le_mode)) {
+ gpr1 = cpu_gpr[reg + 1];
+ gpr2 = cpu_gpr[reg];
+ } else {
+ gpr1 = cpu_gpr[reg];
+ gpr2 = cpu_gpr[reg + 1];
+ }
+ tcg_gen_qemu_st_tl(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+ gen_addr_add(ctx, EA, EA, 8);
+ tcg_gen_qemu_st_tl(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+#endif
tcg_temp_free(EA);
}
-/* stdcx. */
-STCX(stdcx_, 8);
-STCX(stqcx_, 16);
#endif /* defined(TARGET_PPC64) */
/* sync */
@@ -3257,7 +3253,7 @@ static void gen_sync(DisasContext *ctx)
* check MSR_PR as well.
*/
if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
- gen_check_tlb_flush(ctx);
+ gen_check_tlb_flush(ctx, true);
}
}
@@ -3585,10 +3581,13 @@ static void gen_rfi(DisasContext *ctx)
#if defined(CONFIG_USER_ONLY)
GEN_PRIV;
#else
- /* FIXME: This instruction doesn't exist anymore on 64-bit server
- * processors compliant with arch 2.x, we should remove it there,
- * but we need to fix OpenBIOS not to use it on 970 first
+ /* This instruction doesn't exist anymore on 64-bit server
+ * processors compliant with arch 2.x
*/
+ if (ctx->insns_flags & PPC_SEGMENT_64B) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
/* Restore CPU state */
CHK_SV;
gen_update_cfar(ctx, ctx->nip - 4);
@@ -4442,6 +4441,7 @@ static void gen_tlbie(DisasContext *ctx)
#if defined(CONFIG_USER_ONLY)
GEN_PRIV;
#else
+ TCGv_i32 t1;
CHK_HV;
if (NARROW_MODE(ctx)) {
@@ -4452,6 +4452,11 @@ static void gen_tlbie(DisasContext *ctx)
} else {
gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
}
+ t1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH);
+ tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_temp_free_i32(t1);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -4463,11 +4468,10 @@ static void gen_tlbsync(DisasContext *ctx)
#else
CHK_HV;
- /* tlbsync is a nop for server, ptesync handles delayed tlb flush,
- * embedded however needs to deal with tlbsync. We don't try to be
- * fancy and swallow the overhead of checking for both.
- */
- gen_check_tlb_flush(ctx);
+ /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
+ if (ctx->insns_flags & PPC_BOOKE) {
+ gen_check_tlb_flush(ctx, true);
+ }
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6240,6 +6244,7 @@ GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
#endif
@@ -6614,12 +6619,12 @@ GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER)
#if defined(TARGET_PPC64)
GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B)
GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B)
-GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B)
-GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B)
-GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
+GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B)
+GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B)
+GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
/* HV/P7 and later only */
-GEN_LDX_HVRM(ldcix, ld64, 0x15, 0x1b, PPC_CILDST)
+GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
@@ -6650,10 +6655,10 @@ GEN_STS(stb, st8, 0x06, PPC_INTEGER)
GEN_STS(sth, st16, 0x0C, PPC_INTEGER)
GEN_STS(stw, st32, 0x04, PPC_INTEGER)
#if defined(TARGET_PPC64)
-GEN_STUX(std, st64, 0x15, 0x05, PPC_64B)
-GEN_STX(std, st64, 0x15, 0x04, PPC_64B)
-GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
-GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
+GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B)
+GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B)
+GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
diff --git a/target-ppc/translate/fp-impl.inc.c b/target-ppc/translate/fp-impl.inc.c
index 9ba9289157..872af7b56f 100644
--- a/target-ppc/translate/fp-impl.inc.c
+++ b/target-ppc/translate/fp-impl.inc.c
@@ -672,7 +672,7 @@ static inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
}
/* lfd lfdu lfdux lfdx */
-GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT);
+GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT);
/* lfs lfsu lfsux lfsx */
GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
@@ -687,16 +687,16 @@ static void gen_lfdp(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0);
- /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
- 64-bit byteswap already. */
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
+ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else {
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
}
tcg_temp_free(EA);
}
@@ -712,16 +712,16 @@ static void gen_lfdpx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
- 64-bit byteswap already. */
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
+ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else {
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
}
tcg_temp_free(EA);
}
@@ -848,7 +848,7 @@ static inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
}
/* stfd stfdu stfdux stfdx */
-GEN_STFS(stfd, st64, 0x16, PPC_FLOAT);
+GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT);
/* stfs stfsu stfsux stfsx */
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
@@ -863,16 +863,16 @@ static void gen_stfdp(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0);
- /* We only need to swap high and low halves. gen_qemu_st64 does necessary
- 64-bit byteswap already. */
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
+ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
}
tcg_temp_free(EA);
}
@@ -888,16 +888,16 @@ static void gen_stfdpx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- /* We only need to swap high and low halves. gen_qemu_st64 does necessary
- 64-bit byteswap already. */
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
+ necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else {
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
}
tcg_temp_free(EA);
}
@@ -924,9 +924,9 @@ static void gen_lfq(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
@@ -940,9 +940,9 @@ static void gen_lfqu(DisasContext *ctx)
t0 = tcg_temp_new();
t1 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
tcg_temp_free(t0);
@@ -958,10 +958,10 @@ static void gen_lfqux(DisasContext *ctx)
TCGv t0, t1;
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
t1 = tcg_temp_new();
gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
@@ -976,9 +976,9 @@ static void gen_lfqx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
@@ -990,9 +990,9 @@ static void gen_stfq(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
@@ -1005,10 +1005,10 @@ static void gen_stfqu(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
t1 = tcg_temp_new();
gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
@@ -1024,10 +1024,10 @@ static void gen_stfqux(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
t1 = tcg_temp_new();
gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
@@ -1042,9 +1042,9 @@ static void gen_stfqx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
diff --git a/target-ppc/translate/fp-ops.inc.c b/target-ppc/translate/fp-ops.inc.c
index 291a1e62d0..d36ab4ecc3 100644
--- a/target-ppc/translate/fp-ops.inc.c
+++ b/target-ppc/translate/fp-ops.inc.c
@@ -85,7 +85,7 @@ GEN_STUF(name, stop, op | 0x21, type) \
GEN_STUXF(name, stop, op | 0x01, type) \
GEN_STXF(name, stop, 0x17, op | 0x00, type)
-GEN_STFS(stfd, st64, 0x16, PPC_FLOAT)
+GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT)
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT)
GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX)
GEN_HANDLER_E(stfdp, 0x3D, 0xFF, 0xFF, 0x00200003, PPC_NONE, PPC2_ISA205),
diff --git a/target-ppc/translate/spe-impl.inc.c b/target-ppc/translate/spe-impl.inc.c
index 0ce403a861..8c1c16c63e 100644
--- a/target-ppc/translate/spe-impl.inc.c
+++ b/target-ppc/translate/spe-impl.inc.c
@@ -617,7 +617,7 @@ static inline void gen_addr_spe_imm_index(DisasContext *ctx, TCGv EA, int sh)
static inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
{
TCGv_i64 t0 = tcg_temp_new_i64();
- gen_qemu_ld64(ctx, t0, addr);
+ gen_qemu_ld64_i64(ctx, t0, addr);
gen_store_gpr64(rD(ctx->opcode), t0);
tcg_temp_free_i64(t0);
}
@@ -725,7 +725,7 @@ static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
{
TCGv_i64 t0 = tcg_temp_new_i64();
gen_load_gpr64(t0, rS(ctx->opcode));
- gen_qemu_st64(ctx, t0, addr);
+ gen_qemu_st64_i64(ctx, t0, addr);
tcg_temp_free_i64(t0);
}
diff --git a/target-ppc/translate/vmx-impl.inc.c b/target-ppc/translate/vmx-impl.inc.c
index b9841222ea..3ce374d891 100644
--- a/target-ppc/translate/vmx-impl.inc.c
+++ b/target-ppc/translate/vmx-impl.inc.c
@@ -26,16 +26,16 @@ static void glue(gen_, name)(DisasContext *ctx)
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
- /* We only need to swap high and low halves. gen_qemu_ld64 does necessary \
- 64-bit byteswap already. */ \
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does \
+ necessary 64-bit byteswap already. */ \
if (ctx->le_mode) { \
- gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
} else { \
- gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
} \
tcg_temp_free(EA); \
}
@@ -52,16 +52,16 @@ static void gen_st##name(DisasContext *ctx) \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
- /* We only need to swap high and low halves. gen_qemu_st64 does necessary \
- 64-bit byteswap already. */ \
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does \
+ necessary 64-bit byteswap already. */ \
if (ctx->le_mode) { \
- gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
} else { \
- gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
} \
tcg_temp_free(EA); \
}
@@ -569,6 +569,21 @@ static void glue(gen_, name)(DisasContext *ctx) \
tcg_temp_free_ptr(rd); \
}
+#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, rb); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
GEN_VXFORM_NOA(vupkhsb, 7, 8);
GEN_VXFORM_NOA(vupkhsh, 7, 9);
GEN_VXFORM_NOA(vupkhsw, 7, 25);
@@ -639,13 +654,55 @@ static void glue(gen_, name)(DisasContext *ctx) \
tcg_temp_free_ptr(rd); \
}
+#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ uint8_t uimm = UIMM4(ctx->opcode); \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ if (uimm > splat_max) { \
+ uimm = 0; \
+ } \
+ tcg_gen_movi_i32(t0, uimm); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, rb, t0); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
GEN_VXFORM_UIMM(vspltb, 6, 8);
GEN_VXFORM_UIMM(vsplth, 6, 9);
GEN_VXFORM_UIMM(vspltw, 6, 10);
+GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
+GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
+GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
+GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
+GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15);
+GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14);
+GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12);
+GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8);
GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
+GEN_VXFORM_DUAL(vspltb, PPC_NONE, PPC2_ALTIVEC_207,
+ vextractub, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vsplth, PPC_NONE, PPC2_ALTIVEC_207,
+ vextractuh, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltw, PPC_NONE, PPC2_ALTIVEC_207,
+ vextractuw, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltisb, PPC_NONE, PPC2_ALTIVEC_207,
+ vinsertb, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltish, PPC_NONE, PPC2_ALTIVEC_207,
+ vinserth, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltisw, PPC_NONE, PPC2_ALTIVEC_207,
+ vinsertw, PPC_NONE, PPC2_ISA300);
static void gen_vsldoi(DisasContext *ctx)
{
@@ -709,6 +766,24 @@ static void gen_vmladduhm(DisasContext *ctx)
tcg_temp_free_ptr(rd);
}
+static void gen_vpermr(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rc, rd;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rc = gen_avr_ptr(rC(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_vpermr(cpu_env, rd, ra, rb, rc);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rc);
+ tcg_temp_free_ptr(rd);
+}
+
GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18)
GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19)
GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20)
@@ -719,6 +794,10 @@ GEN_VXFORM_NOA(vclzb, 1, 28)
GEN_VXFORM_NOA(vclzh, 1, 29)
GEN_VXFORM_NOA(vclzw, 1, 30)
GEN_VXFORM_NOA(vclzd, 1, 31)
+GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
+GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
+GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
+GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
GEN_VXFORM_NOA(vpopcntb, 1, 28)
GEN_VXFORM_NOA(vpopcnth, 1, 29)
GEN_VXFORM_NOA(vpopcntw, 1, 30)
@@ -731,6 +810,7 @@ GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vbpermd, 6, 23);
GEN_VXFORM(vbpermq, 6, 21);
GEN_VXFORM_NOA(vgbbd, 6, 20);
GEN_VXFORM(vpmsumb, 4, 16)
diff --git a/target-ppc/translate/vmx-ops.inc.c b/target-ppc/translate/vmx-ops.inc.c
index 2a9f225e6a..a7022a0de5 100644
--- a/target-ppc/translate/vmx-ops.inc.c
+++ b/target-ppc/translate/vmx-ops.inc.c
@@ -41,6 +41,13 @@ GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207)
#define GEN_VXFORM_300(name, opc2, opc3) \
GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300)
+#define GEN_VXFORM_300_EXT(name, opc2, opc3, inval) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, inval, PPC_NONE, PPC2_ISA300)
+
+#define GEN_VXFORM_300_EO(name, opc2, opc3, opc4) \
+GEN_HANDLER_E_2(name, 0x04, opc2, opc3, opc4, 0x00000000, PPC_NONE, \
+ PPC2_ISA300)
+
#define GEN_VXFORM_DUAL(name0, name1, opc2, opc3, type0, type1) \
GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, type0, type1)
@@ -191,11 +198,28 @@ GEN_VXRFORM(vcmpgefp, 3, 7)
GEN_VXRFORM_DUAL(vcmpgtfp, vcmpgtud, 3, 11, PPC_ALTIVEC, PPC_NONE)
GEN_VXRFORM_DUAL(vcmpbfp, vcmpgtsd, 3, 15, PPC_ALTIVEC, PPC_NONE)
-#define GEN_VXFORM_SIMM(name, opc2, opc3) \
- GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
-GEN_VXFORM_SIMM(vspltisb, 6, 12),
-GEN_VXFORM_SIMM(vspltish, 6, 13),
-GEN_VXFORM_SIMM(vspltisw, 6, 14),
+#define GEN_VXFORM_DUAL_INV(name0, name1, opc2, opc3, inval0, inval1, type) \
+GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, \
+ PPC_NONE)
+GEN_VXFORM_DUAL_INV(vspltb, vextractub, 6, 8, 0x00000000, 0x100000,
+ PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000,
+ PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000,
+ PPC2_ALTIVEC_207),
+GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000),
+GEN_VXFORM_DUAL_INV(vspltisb, vinsertb, 6, 12, 0x00000000, 0x100000,
+ PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL_INV(vspltish, vinserth, 6, 13, 0x00000000, 0x100000,
+ PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL_INV(vspltisw, vinsertw, 6, 14, 0x00000000, 0x100000,
+ PPC2_ALTIVEC_207),
+GEN_VXFORM_300_EXT(vinsertd, 6, 15, 0x100000),
+GEN_VXFORM_300_EO(vctzb, 0x01, 0x18, 0x1C),
+GEN_VXFORM_300_EO(vctzh, 0x01, 0x18, 0x1D),
+GEN_VXFORM_300_EO(vctzw, 0x01, 0x18, 0x1E),
+GEN_VXFORM_300_EO(vctzd, 0x01, 0x18, 0x1F),
+GEN_VXFORM_300(vpermr, 0x1D, 0xFF),
#define GEN_VXFORM_NOA(name, opc2, opc3) \
GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC)
@@ -218,9 +242,6 @@ GEN_VXFORM_NOA(vrfiz, 5, 9),
#define GEN_VXFORM_UIMM(name, opc2, opc3) \
GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
-GEN_VXFORM_UIMM(vspltb, 6, 8),
-GEN_VXFORM_UIMM(vsplth, 6, 9),
-GEN_VXFORM_UIMM(vspltw, 6, 10),
GEN_VXFORM_UIMM(vcfux, 5, 12),
GEN_VXFORM_UIMM(vcfsx, 5, 13),
GEN_VXFORM_UIMM(vctuxs, 5, 14),
@@ -241,6 +262,7 @@ GEN_VXFORM_DUAL(vclzh, vpopcnth, 1, 29, PPC_NONE, PPC2_ALTIVEC_207),
GEN_VXFORM_DUAL(vclzw, vpopcntw, 1, 30, PPC_NONE, PPC2_ALTIVEC_207),
GEN_VXFORM_DUAL(vclzd, vpopcntd, 1, 31, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_300(vbpermd, 6, 23),
GEN_VXFORM_207(vbpermq, 6, 21),
GEN_VXFORM_207(vgbbd, 6, 20),
GEN_VXFORM_207(vpmsumb, 4, 16),
diff --git a/target-ppc/translate/vsx-impl.inc.c b/target-ppc/translate/vsx-impl.inc.c
index 9f77b06bd8..eee6052d03 100644
--- a/target-ppc/translate/vsx-impl.inc.c
+++ b/target-ppc/translate/vsx-impl.inc.c
@@ -34,8 +34,10 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free(EA); \
}
-VSX_LOAD_SCALAR(lxsdx, ld64)
+VSX_LOAD_SCALAR(lxsdx, ld64_i64)
VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
+VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
+VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
VSX_LOAD_SCALAR(lxsspx, ld32fs)
@@ -49,9 +51,9 @@ static void gen_lxvd2x(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- gen_qemu_ld64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
+ gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
+ gen_qemu_ld64_i64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
tcg_temp_free(EA);
}
@@ -65,7 +67,7 @@ static void gen_lxvdsx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- gen_qemu_ld64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
+ gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
tcg_temp_free(EA);
}
@@ -115,7 +117,10 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free(EA); \
}
-VSX_STORE_SCALAR(stxsdx, st64)
+VSX_STORE_SCALAR(stxsdx, st64_i64)
+
+VSX_STORE_SCALAR(stxsibx, st8_i64)
+VSX_STORE_SCALAR(stxsihx, st16_i64)
VSX_STORE_SCALAR(stxsiwx, st32_i64)
VSX_STORE_SCALAR(stxsspx, st32fs)
@@ -129,9 +134,9 @@ static void gen_stxvd2x(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- gen_qemu_st64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
+ gen_qemu_st64_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
+ gen_qemu_st64_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
tcg_temp_free(EA);
}
@@ -647,6 +652,26 @@ static void gen_xxspltw(DisasContext *ctx)
tcg_temp_free_i64(b2);
}
+#define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
+
+static void gen_xxspltib(DisasContext *ctx)
+{
+ unsigned char uim8 = IMM8(ctx->opcode);
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ }
+ tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), pattern(uim8));
+ tcg_gen_movi_i64(cpu_vsrl(xT(ctx->opcode)), pattern(uim8));
+}
+
static void gen_xxsldwi(DisasContext *ctx)
{
TCGv_i64 xth, xtl;
diff --git a/target-ppc/translate/vsx-ops.inc.c b/target-ppc/translate/vsx-ops.inc.c
index 8b9da65444..414b73bd10 100644
--- a/target-ppc/translate/vsx-ops.inc.c
+++ b/target-ppc/translate/vsx-ops.inc.c
@@ -1,12 +1,16 @@
GEN_HANDLER_E(lxsdx, 0x1F, 0x0C, 0x12, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(lxsiwax, 0x1F, 0x0C, 0x02, 0, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(lxsiwzx, 0x1F, 0x0C, 0x00, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(lxsibzx, 0x1F, 0x0D, 0x18, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxsihzx, 0x1F, 0x0D, 0x19, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(lxsspx, 0x1F, 0x0C, 0x10, 0, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(lxvd2x, 0x1F, 0x0C, 0x1A, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(stxsdx, 0x1F, 0xC, 0x16, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxsibx, 0x1F, 0xD, 0x1C, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stxsihx, 0x1F, 0xD, 0x1D, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(stxsiwx, 0x1F, 0xC, 0x04, 0, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(stxsspx, 0x1F, 0xC, 0x14, 0, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX),
@@ -20,6 +24,10 @@ GEN_HANDLER_E(mfvsrd, 0x1F, 0x13, 0x01, 0x0000F800, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(mtvsrd, 0x1F, 0x13, 0x05, 0x0000F800, PPC_NONE, PPC2_VSX207),
#endif
+#define GEN_XX1FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
+
#define GEN_XX2FORM(name, opc2, opc3, fl2) \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
@@ -222,6 +230,7 @@ VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207),
GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX),
GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX),
GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX),
+GEN_XX1FORM(xxspltib, 0x08, 0x0B, PPC2_ISA300),
GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00),
#define GEN_XXSEL_ROW(opc3) \