aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--accel/tcg/cpu-exec-common.c30
-rw-r--r--include/exec/cpu-common.h1
-rw-r--r--include/exec/cpu_ldst.h4
-rw-r--r--migration/dirtyrate.c10
-rw-r--r--softmmu/dirtylimit.c26
-rw-r--r--softmmu/physmem.c50
-rw-r--r--tcg/sparc64/tcg-target.c.inc11
-rw-r--r--tcg/sparc64/tcg-target.h2
-rw-r--r--tcg/tcg-op-vec.c7
9 files changed, 63 insertions, 78 deletions
diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c
index 9a5fabf625..7e35d7f4b5 100644
--- a/accel/tcg/cpu-exec-common.c
+++ b/accel/tcg/cpu-exec-common.c
@@ -33,36 +33,6 @@ void cpu_loop_exit_noexc(CPUState *cpu)
cpu_loop_exit(cpu);
}
-#if defined(CONFIG_SOFTMMU)
-void cpu_reloading_memory_map(void)
-{
- if (qemu_in_vcpu_thread() && current_cpu->running) {
- /* The guest can in theory prolong the RCU critical section as long
- * as it feels like. The major problem with this is that because it
- * can do multiple reconfigurations of the memory map within the
- * critical section, we could potentially accumulate an unbounded
- * collection of memory data structures awaiting reclamation.
- *
- * Because the only thing we're currently protecting with RCU is the
- * memory data structures, it's sufficient to break the critical section
- * in this callback, which we know will get called every time the
- * memory map is rearranged.
- *
- * (If we add anything else in the system that uses RCU to protect
- * its data structures, we will need to implement some other mechanism
- * to force TCG CPUs to exit the critical section, at which point this
- * part of this callback might become unnecessary.)
- *
- * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
- * only protects cpu->as->dispatch. Since we know our caller is about
- * to reload it, it's safe to split the critical section.
- */
- rcu_read_unlock();
- rcu_read_lock();
- }
-}
-#endif
-
void cpu_loop_exit(CPUState *cpu)
{
/* Undo the setting in cpu_tb_exec. */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 87dc9a752c..41788c0bdd 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -133,7 +133,6 @@ static inline void cpu_physical_memory_write(hwaddr addr,
{
cpu_physical_memory_rw(addr, (void *)buf, len, true);
}
-void cpu_reloading_memory_map(void);
void *cpu_physical_memory_map(hwaddr addr,
hwaddr *plen,
bool is_write);
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index f3ce4eb1d0..da10ba1433 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -121,8 +121,8 @@ static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
h2g_nocheck(x); \
})
#else
-typedef vaddr abi_ptr;
-#define TARGET_ABI_FMT_ptr "%016" VADDR_PRIx
+typedef target_ulong abi_ptr;
+#define TARGET_ABI_FMT_ptr TARGET_FMT_lx
#endif
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index 84f1b0fb20..bccb3515e3 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -57,6 +57,8 @@ static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time)
msec = current_time - initial_time;
} else {
g_usleep((msec + initial_time - current_time) * 1000);
+ /* g_usleep may overshoot */
+ msec = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - initial_time;
}
return msec;
@@ -77,9 +79,13 @@ static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
{
uint64_t increased_dirty_pages =
dirty_pages.end_pages - dirty_pages.start_pages;
- uint64_t memory_size_MiB = qemu_target_pages_to_MiB(increased_dirty_pages);
- return memory_size_MiB * 1000 / calc_time_ms;
+ /*
+ * multiply by 1000ms/s _before_ converting down to megabytes
+ * to avoid losing precision
+ */
+ return qemu_target_pages_to_MiB(increased_dirty_pages * 1000) /
+ calc_time_ms;
}
void global_dirty_log_change(unsigned int flag, bool start)
diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index 3c275ee55b..fa959d7743 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -100,7 +100,7 @@ static void vcpu_dirty_rate_stat_collect(void)
stat.rates[i].dirty_rate;
}
- free(stat.rates);
+ g_free(stat.rates);
}
static void *vcpu_dirty_rate_stat_thread(void *opaque)
@@ -171,10 +171,10 @@ void vcpu_dirty_rate_stat_initialize(void)
void vcpu_dirty_rate_stat_finalize(void)
{
- free(vcpu_dirty_rate_stat->stat.rates);
+ g_free(vcpu_dirty_rate_stat->stat.rates);
vcpu_dirty_rate_stat->stat.rates = NULL;
- free(vcpu_dirty_rate_stat);
+ g_free(vcpu_dirty_rate_stat);
vcpu_dirty_rate_stat = NULL;
}
@@ -220,10 +220,10 @@ void dirtylimit_state_initialize(void)
void dirtylimit_state_finalize(void)
{
- free(dirtylimit_state->states);
+ g_free(dirtylimit_state->states);
dirtylimit_state->states = NULL;
- free(dirtylimit_state);
+ g_free(dirtylimit_state);
dirtylimit_state = NULL;
trace_dirtylimit_state_finalize();
@@ -653,7 +653,8 @@ struct DirtyLimitInfoList *qmp_query_vcpu_dirty_limit(Error **errp)
void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict)
{
- DirtyLimitInfoList *limit, *head, *info = NULL;
+ DirtyLimitInfoList *info;
+ g_autoptr(DirtyLimitInfoList) head = NULL;
Error *err = NULL;
if (!dirtylimit_in_service()) {
@@ -661,20 +662,17 @@ void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict)
return;
}
- info = qmp_query_vcpu_dirty_limit(&err);
+ head = qmp_query_vcpu_dirty_limit(&err);
if (err) {
hmp_handle_error(mon, err);
return;
}
- head = info;
- for (limit = head; limit != NULL; limit = limit->next) {
+ for (info = head; info != NULL; info = info->next) {
monitor_printf(mon, "vcpu[%"PRIi64"], limit rate %"PRIi64 " (MB/s),"
" current rate %"PRIi64 " (MB/s)\n",
- limit->value->cpu_index,
- limit->value->limit_rate,
- limit->value->current_rate);
+ info->value->cpu_index,
+ info->value->limit_rate,
+ info->value->current_rate);
}
-
- g_free(info);
}
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 3df73542e1..18277ddd67 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -680,8 +680,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
IOMMUTLBEntry iotlb;
int iommu_idx;
hwaddr addr = orig_addr;
- AddressSpaceDispatch *d =
- qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
+ AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
for (;;) {
section = address_space_translate_internal(d, addr, &addr, plen, false);
@@ -2412,10 +2411,16 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
{
int asidx = cpu_asidx_from_attrs(cpu, attrs);
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
- AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch);
- MemoryRegionSection *sections = d->map.sections;
+ AddressSpaceDispatch *d = cpuas->memory_dispatch;
+ int section_index = index & ~TARGET_PAGE_MASK;
+ MemoryRegionSection *ret;
+
+ assert(section_index < d->map.sections_nb);
+ ret = d->map.sections + section_index;
+ assert(ret->mr);
+ assert(ret->mr->ops);
- return &sections[index & ~TARGET_PAGE_MASK];
+ return ret;
}
static void io_mem_init(void)
@@ -2481,23 +2486,42 @@ static void tcg_log_global_after_sync(MemoryListener *listener)
}
}
+static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data)
+{
+ CPUAddressSpace *cpuas = data.host_ptr;
+
+ cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as);
+ tlb_flush(cpu);
+}
+
static void tcg_commit(MemoryListener *listener)
{
CPUAddressSpace *cpuas;
- AddressSpaceDispatch *d;
+ CPUState *cpu;
assert(tcg_enabled());
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
- cpu_reloading_memory_map();
- /* The CPU and TLB are protected by the iothread lock.
- * We reload the dispatch pointer now because cpu_reloading_memory_map()
- * may have split the RCU critical section.
+ cpu = cpuas->cpu;
+
+ /*
+ * Defer changes to as->memory_dispatch until the cpu is quiescent.
+ * Otherwise we race between (1) other cpu threads and (2) ongoing
+ * i/o for the current cpu thread, with data cached by mmu_lookup().
+ *
+ * In addition, queueing the work function will kick the cpu back to
+ * the main loop, which will end the RCU critical section and reclaim
+ * the memory data structures.
+ *
+ * That said, the listener is also called during realize, before
+ * all of the tcg machinery for run-on is initialized: thus halt_cond.
*/
- d = address_space_to_dispatch(cpuas->as);
- qatomic_rcu_set(&cpuas->memory_dispatch, d);
- tlb_flush(cpuas->cpu);
+ if (cpu->halt_cond) {
+ async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas));
+ } else {
+ tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas));
+ }
}
static void memory_map_init(void)
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index f2a346a1bd..81a08bb6c5 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -529,11 +529,6 @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
tcg_out_ext32u(s, rd, rs);
}
-static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
-{
- tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
-}
-
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
{
return false;
@@ -1444,9 +1439,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_divu_i64:
c = ARITH_UDIVX;
goto gen_arith;
- case INDEX_op_extrh_i64_i32:
- tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
- break;
case INDEX_op_brcond_i64:
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
@@ -1501,7 +1493,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
default:
g_assert_not_reached();
}
@@ -1533,8 +1524,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- case INDEX_op_extrh_i64_i32:
case INDEX_op_qemu_ld_a32_i32:
case INDEX_op_qemu_ld_a64_i32:
case INDEX_op_qemu_ld_a32_i64:
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
index 3d41c9659b..5cfc4b4679 100644
--- a/tcg/sparc64/tcg-target.h
+++ b/tcg/sparc64/tcg-target.h
@@ -115,7 +115,7 @@ extern bool use_vis3_instructions;
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_qemu_st8_i32 0
-#define TCG_TARGET_HAS_extr_i64_i32 1
+#define TCG_TARGET_HAS_extr_i64_i32 0
#define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 0
#define TCG_TARGET_HAS_rot_i64 0
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
index ad8ee08a7e..094298bb27 100644
--- a/tcg/tcg-op-vec.c
+++ b/tcg/tcg-op-vec.c
@@ -391,12 +391,11 @@ static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc)
void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
{
- const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
-
- if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) {
+ if (TCG_TARGET_HAS_not_vec) {
+ vec_gen_op2(INDEX_op_not_vec, 0, r, a);
+ } else {
tcg_gen_xor_vec(0, r, a, tcg_constant_vec_matching(r, 0, -1));
}
- tcg_swap_vecop_list(hold_list);
}
void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a)