aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cpu-exec.c2
-rw-r--r--exec-all.h2
-rw-r--r--exec.c12
-rw-r--r--gen-icount.h2
-rw-r--r--target-arm/translate.c2
-rw-r--r--target-cris/translate.c2
-rw-r--r--target-m68k/translate.c2
-rw-r--r--target-mips/translate.c4
-rw-r--r--vl.c8
9 files changed, 18 insertions, 18 deletions
diff --git a/cpu-exec.c b/cpu-exec.c
index 08e10f4322..6b46bd2c98 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -620,7 +620,7 @@ int cpu_exec(CPUState *env1)
next_tb = tcg_qemu_tb_exec(tc_ptr);
env->current_tb = NULL;
if ((next_tb & 3) == 2) {
- /* Instruction counter exired. */
+ /* Instruction counter expired. */
int insns_left;
tb = (TranslationBlock *)(long)(next_tb & ~3);
/* Restore PC. */
diff --git a/exec-all.h b/exec-all.h
index b169370002..4469e95c8c 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -372,7 +372,7 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base;
}
-/* Deterministic execution requires that IO only be performaed on the last
+/* Deterministic execution requires that IO only be performed on the last
instruction of a TB so that interrupts take effect immediately. */
static inline int can_do_io(CPUState *env)
{
diff --git a/exec.c b/exec.c
index a664b6fabd..64c87b31e8 100644
--- a/exec.c
+++ b/exec.c
@@ -109,7 +109,7 @@ CPUState *first_cpu;
cpu_exec() */
CPUState *cpu_single_env;
/* 0 = Do not count executed instructions.
- 1 = Precice instruction counting.
+ 1 = Precise instruction counting.
2 = Adaptive rate instruction counting. */
int use_icount = 0;
/* Current instruction counter. While executing translated code this may
@@ -1080,7 +1080,7 @@ TranslationBlock *tb_alloc(target_ulong pc)
void tb_free(TranslationBlock *tb)
{
- /* In practice this is mostly used for single use temorary TB
+ /* In practice this is mostly used for single use temporary TB
Ignore the hard cases and just back up if this TB happens to
be the last one generated. */
if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
@@ -1394,7 +1394,7 @@ void cpu_interrupt(CPUState *env, int mask)
old_mask = env->interrupt_request;
/* FIXME: This is probably not threadsafe. A different thread could
- be in the mittle of a read-modify-write operation. */
+ be in the middle of a read-modify-write operation. */
env->interrupt_request |= mask;
#if defined(USE_NPTL)
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
@@ -3019,13 +3019,13 @@ void cpu_io_recompile(CPUState *env, void *retaddr)
n = env->icount_decr.u16.low + tb->icount;
cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
/* Calculate how many instructions had been executed before the fault
- occured. */
+ occurred. */
n = n - env->icount_decr.u16.low;
/* Generate a new TB ending on the I/O insn. */
n++;
/* On MIPS and SH, delay slot instructions can only be restarted if
they were already the first instruction in the TB. If this is not
- the first instruction in a TB then re-execute the preceeding
+ the first instruction in a TB then re-execute the preceding
branch. */
#if defined(TARGET_MIPS)
if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
@@ -3053,7 +3053,7 @@ void cpu_io_recompile(CPUState *env, void *retaddr)
/* FIXME: In theory this could raise an exception. In practice
we have already translated the block once so it's probably ok. */
tb_gen_code(env, pc, cs_base, flags, cflags);
- /* TODO: If env->pc != tb->pc (i.e. the failuting instruction was not
+ /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
the first in the TB) then we end up generating a whole new TB and
repeating the fault, which is horribly inefficient.
Better would be to execute just this insn uncached, or generate a
diff --git a/gen-icount.h b/gen-icount.h
index 172b2bc7d7..61545f1577 100644
--- a/gen-icount.h
+++ b/gen-icount.h
@@ -1,4 +1,4 @@
-/* Helpewrs for instruction counting code genration. */
+/* Helpers for instruction counting code generation. */
static TCGArg *icount_arg;
static int icount_label;
diff --git a/target-arm/translate.c b/target-arm/translate.c
index b9b9838fe6..2831432e7d 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -8684,7 +8684,7 @@ static inline int gen_intermediate_code_internal(CPUState *env,
/* Translation stops when a conditional branch is enoutered.
* Otherwise the subsequent code could get translated several times.
* Also stop translation when a page boundary is reached. This
- * ensures prefech aborts occur at the right place. */
+ * ensures prefetch aborts occur at the right place. */
num_insns ++;
} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
!env->singlestep_enabled &&
diff --git a/target-cris/translate.c b/target-cris/translate.c
index 2fc4101d44..81b61326f5 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -3141,7 +3141,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
num_insns++;
/* Check for delayed branches here. If we do it before
- actually genereating any host code, the simulator will just
+ actually generating any host code, the simulator will just
loop doing nothing for on this program location. */
if (dc->delayed_branch) {
dc->delayed_branch--;
diff --git a/target-m68k/translate.c b/target-m68k/translate.c
index a920d213b0..e808db5e84 100644
--- a/target-m68k/translate.c
+++ b/target-m68k/translate.c
@@ -2980,7 +2980,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
num_insns++;
/* Terminate the TB on memory ops if watchpoints are present. */
- /* FIXME: This should be replacd by the deterministic execution
+ /* FIXME: This should be replaced by the deterministic execution
* IRQ raising bits. */
if (dc->is_mem && env->nb_watchpoints)
break;
diff --git a/target-mips/translate.c b/target-mips/translate.c
index bd14e32550..c1b38233a8 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -3998,7 +3998,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int se
rn, reg, sel);
}
#endif
- /* For simplicitly assume that all writes can cause interrupts. */
+ /* For simplicity assume that all writes can cause interrupts. */
if (use_icount) {
gen_io_end();
ctx->bstate = BS_STOP;
@@ -5170,7 +5170,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int s
}
#endif
tcg_temp_free(t0);
- /* For simplicitly assume that all writes can cause interrupts. */
+ /* For simplicity assume that all writes can cause interrupts. */
if (use_icount) {
gen_io_end();
ctx->bstate = BS_STOP;
diff --git a/vl.c b/vl.c
index 00d43eeeee..d6da115eb9 100644
--- a/vl.c
+++ b/vl.c
@@ -239,9 +239,9 @@ struct drive_opt {
static CPUState *cur_cpu;
static CPUState *next_cpu;
static int event_pending = 1;
-/* Conversion factor from emulated instrctions to virtual clock ticks. */
+/* Conversion factor from emulated instructions to virtual clock ticks. */
static int icount_time_shift;
-/* Arbitrarily pick 1MIPS as the minimum alowable speed. */
+/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
#define MAX_ICOUNT_SHIFT 10
/* Compensate for varying guest execution speed. */
static int64_t qemu_icount_bias;
@@ -903,7 +903,7 @@ static void rtc_stop_timer(struct qemu_alarm_timer *t);
#endif /* _WIN32 */
/* Correlation between real and virtual time is always going to be
- farly approximate, so ignore small variation.
+ fairly approximate, so ignore small variation.
When the guest is idle real and virtual time will be aligned in
the IO wait loop. */
#define ICOUNT_WOBBLE (QEMU_TIMER_BASE / 10)
@@ -7262,7 +7262,7 @@ static int main_loop(void)
if (use_icount == 1) {
/* When not using an adaptive execution frequency
we tend to get badly out of sync with real time,
- so just delay for a resonable amount of time. */
+ so just delay for a reasonable amount of time. */
delta = 0;
} else {
delta = cpu_get_icount() - cpu_get_clock();