aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162>2008-03-28 22:29:15 +0000
committeraurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162>2008-03-28 22:29:15 +0000
commit15a5115690558ad65de02d9b9bb4ec89bc4cf8ac (patch)
tree4407ce082f2a29ecb358cab23dd5b0f120f92864
parentf9e7bcfe6b8d32431374c86542a1d671681a7db1 (diff)
Use spinlock_t for interrupt_lock, lock support for HPPA (Stuart Brady)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4118 c046a42c-6fe2-441c-8c8c-71466251a162
-rw-r--r--exec-all.h57
-rw-r--r--exec.c4
2 files changed, 54 insertions, 7 deletions
diff --git a/exec-all.h b/exec-all.h
index a35b7d6c2e..62b8191913 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -297,6 +297,30 @@ extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
+#if defined(__hppa__)
+
+typedef int spinlock_t[4];
+
+#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
+
+static inline void resetlock (spinlock_t *p)
+{
+ (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
+}
+
+#else
+
+typedef int spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED 0
+
+static inline void resetlock (spinlock_t *p)
+{
+ *p = SPIN_LOCK_UNLOCKED;
+}
+
+#endif
+
#if defined(__powerpc__)
static inline int testandset (int *p)
{
@@ -396,6 +420,33 @@ static inline int testandset (int *p)
: "cc","memory");
return ret;
}
+#elif defined(__hppa__)
+
+/* Because malloc only guarantees 8-byte alignment for malloc'd data,
+ and GCC only guarantees 8-byte alignment for stack locals, we can't
+ be assured of 16-byte alignment for atomic lock data even if we
+ specify "__attribute ((aligned(16)))" in the type declaration. So,
+ we use a struct containing an array of four ints for the atomic lock
+ type and dynamically select the 16-byte aligned int from the array
+ for the semaphore. */
+#define __PA_LDCW_ALIGNMENT 16
+static inline void *ldcw_align (void *p) {
+ unsigned long a = (unsigned long)p;
+ a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
+ return (void *)a;
+}
+
+static inline int testandset (spinlock_t *p)
+{
+ unsigned int ret;
+ p = ldcw_align(p);
+ __asm__ __volatile__("ldcw 0(%1),%0"
+ : "=r" (ret)
+ : "r" (p)
+ : "memory" );
+ return !ret;
+}
+
#elif defined(__ia64)
#include <ia64intrin.h>
@@ -428,10 +479,6 @@ static inline int testandset (int *p)
#error unimplemented CPU support
#endif
-typedef int spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED 0
-
#if defined(CONFIG_USER_ONLY)
static inline void spin_lock(spinlock_t *lock)
{
@@ -440,7 +487,7 @@ static inline void spin_lock(spinlock_t *lock)
static inline void spin_unlock(spinlock_t *lock)
{
- *lock = 0;
+ resetlock(lock);
}
static inline int spin_trylock(spinlock_t *lock)
diff --git a/exec.c b/exec.c
index b74d90920c..48dabd67ad 100644
--- a/exec.c
+++ b/exec.c
@@ -1215,7 +1215,7 @@ void cpu_set_log_filename(const char *filename)
void cpu_interrupt(CPUState *env, int mask)
{
TranslationBlock *tb;
- static int interrupt_lock;
+ static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
env->interrupt_request |= mask;
/* if the cpu is currently executing code, we must unlink it and
@@ -1224,7 +1224,7 @@ void cpu_interrupt(CPUState *env, int mask)
if (tb && !testandset(&interrupt_lock)) {
env->current_tb = NULL;
tb_reset_jump_recursive(tb);
- interrupt_lock = 0;
+ resetlock(&interrupt_lock);
}
}