aboutsummaryrefslogtreecommitdiff
path: root/hw/cuda.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2011-03-11 16:47:48 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2011-03-21 09:23:23 +0100
commit74475455442398a64355428b37422d14ccc293cb (patch)
tree2cd6fea3fef5aeca9c2a73ea568ed49fd2b51de1 /hw/cuda.c
parent7bd427d801e1e3293a634d3c83beadaa90ffb911 (diff)
change all other clock references to use nanosecond resolution accessors
This was done with: sed -i 's/qemu_get_clock\>/qemu_get_clock_ns/' \ $(git grep -l 'qemu_get_clock\>' ) sed -i 's/qemu_new_timer\>/qemu_new_timer_ns/' \ $(git grep -l 'qemu_new_timer\>' ) after checking that get_clock and new_timer never occur twice on the same line. There were no missed occurrences; however, even if there had been, they would have been caught by the compiler. There was exactly one false positive in qemu_run_timers: - current_time = qemu_get_clock (clock); + current_time = qemu_get_clock_ns (clock); which is of course not in this patch. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'hw/cuda.c')
-rw-r--r--hw/cuda.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/hw/cuda.c b/hw/cuda.c
index e4c178dd2a..37aa3f47fc 100644
--- a/hw/cuda.c
+++ b/hw/cuda.c
@@ -170,7 +170,7 @@ static unsigned int get_counter(CUDATimer *s)
int64_t d;
unsigned int counter;
- d = muldiv64(qemu_get_clock(vm_clock) - s->load_time,
+ d = muldiv64(qemu_get_clock_ns(vm_clock) - s->load_time,
CUDA_TIMER_FREQ, get_ticks_per_sec());
if (s->index == 0) {
/* the timer goes down from latch to -1 (period of latch + 2) */
@@ -189,7 +189,7 @@ static unsigned int get_counter(CUDATimer *s)
static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val)
{
CUDA_DPRINTF("T%d.counter=%d\n", 1 + (ti->timer == NULL), val);
- ti->load_time = qemu_get_clock(vm_clock);
+ ti->load_time = qemu_get_clock_ns(vm_clock);
ti->counter_value = val;
cuda_timer_update(s, ti, ti->load_time);
}
@@ -346,7 +346,7 @@ static void cuda_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
break;
case 4:
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
- cuda_timer_update(s, &s->timers[0], qemu_get_clock(vm_clock));
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
break;
case 5:
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
@@ -355,12 +355,12 @@ static void cuda_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
break;
case 6:
s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
- cuda_timer_update(s, &s->timers[0], qemu_get_clock(vm_clock));
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
break;
case 7:
s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
s->ifr &= ~T1_INT;
- cuda_timer_update(s, &s->timers[0], qemu_get_clock(vm_clock));
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
break;
case 8:
s->timers[1].latch = val;
@@ -374,7 +374,7 @@ static void cuda_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
break;
case 11:
s->acr = val;
- cuda_timer_update(s, &s->timers[0], qemu_get_clock(vm_clock));
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
cuda_update(s);
break;
case 12:
@@ -506,7 +506,7 @@ static void cuda_adb_poll(void *opaque)
cuda_send_packet_to_host(s, obuf, olen + 2);
}
qemu_mod_timer(s->adb_poll_timer,
- qemu_get_clock(vm_clock) +
+ qemu_get_clock_ns(vm_clock) +
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
}
@@ -524,7 +524,7 @@ static void cuda_receive_packet(CUDAState *s,
s->autopoll = autopoll;
if (autopoll) {
qemu_mod_timer(s->adb_poll_timer,
- qemu_get_clock(vm_clock) +
+ qemu_get_clock_ns(vm_clock) +
(get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
} else {
qemu_del_timer(s->adb_poll_timer);
@@ -536,14 +536,14 @@ static void cuda_receive_packet(CUDAState *s,
break;
case CUDA_SET_TIME:
ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4];
- s->tick_offset = ti - (qemu_get_clock(vm_clock) / get_ticks_per_sec());
+ s->tick_offset = ti - (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec());
obuf[0] = CUDA_PACKET;
obuf[1] = 0;
obuf[2] = 0;
cuda_send_packet_to_host(s, obuf, 3);
break;
case CUDA_GET_TIME:
- ti = s->tick_offset + (qemu_get_clock(vm_clock) / get_ticks_per_sec());
+ ti = s->tick_offset + (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec());
obuf[0] = CUDA_PACKET;
obuf[1] = 0;
obuf[2] = 0;
@@ -754,14 +754,14 @@ void cuda_init (int *cuda_mem_index, qemu_irq irq)
s->irq = irq;
s->timers[0].index = 0;
- s->timers[0].timer = qemu_new_timer(vm_clock, cuda_timer1, s);
+ s->timers[0].timer = qemu_new_timer_ns(vm_clock, cuda_timer1, s);
s->timers[1].index = 1;
qemu_get_timedate(&tm, 0);
s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET;
- s->adb_poll_timer = qemu_new_timer(vm_clock, cuda_adb_poll, s);
+ s->adb_poll_timer = qemu_new_timer_ns(vm_clock, cuda_adb_poll, s);
*cuda_mem_index = cpu_register_io_memory(cuda_read, cuda_write, s,
DEVICE_NATIVE_ENDIAN);
register_savevm(NULL, "cuda", -1, 1, cuda_save, cuda_load, s);