1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
/*
* Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <inttypes.h>
#include <pthread.h>
int err;
#include "hex_test.h"
static inline int32_t atomic_inc32(int32_t *x)
{
int32_t old, dummy;
__asm__ __volatile__(
"1: %0 = memw_locked(%2)\n\t"
" %1 = add(%0, #1)\n\t"
" memw_locked(%2, p0) = %1\n\t"
" if (!p0) jump 1b\n\t"
: "=&r"(old), "=&r"(dummy)
: "r"(x)
: "p0", "memory");
return old;
}
static inline int64_t atomic_inc64(int64_t *x)
{
int64_t old, dummy;
__asm__ __volatile__(
"1: %0 = memd_locked(%2)\n\t"
" %1 = #1\n\t"
" %1 = add(%0, %1)\n\t"
" memd_locked(%2, p0) = %1\n\t"
" if (!p0) jump 1b\n\t"
: "=&r"(old), "=&r"(dummy)
: "r"(x)
: "p0", "memory");
return old;
}
static inline int32_t atomic_dec32(int32_t *x)
{
int32_t old, dummy;
__asm__ __volatile__(
"1: %0 = memw_locked(%2)\n\t"
" %1 = add(%0, #-1)\n\t"
" memw_locked(%2, p0) = %1\n\t"
" if (!p0) jump 1b\n\t"
: "=&r"(old), "=&r"(dummy)
: "r"(x)
: "p0", "memory");
return old;
}
static inline int64_t atomic_dec64(int64_t *x)
{
int64_t old, dummy;
__asm__ __volatile__(
"1: %0 = memd_locked(%2)\n\t"
" %1 = #-1\n\t"
" %1 = add(%0, %1)\n\t"
" memd_locked(%2, p0) = %1\n\t"
" if (!p0) jump 1b\n\t"
: "=&r"(old), "=&r"(dummy)
: "r"(x)
: "p0", "memory");
return old;
}
#define LOOP_CNT 1000
volatile int32_t tick32 = 1; /* Using volatile because we are testing atomics */
volatile int64_t tick64 = 1; /* Using volatile because we are testing atomics */
void *thread1_func(void *arg)
{
for (int i = 0; i < LOOP_CNT; i++) {
atomic_inc32(&tick32);
atomic_dec64(&tick64);
}
return NULL;
}
void *thread2_func(void *arg)
{
for (int i = 0; i < LOOP_CNT; i++) {
atomic_dec32(&tick32);
atomic_inc64(&tick64);
}
return NULL;
}
void test_pthread(void)
{
pthread_t tid1, tid2;
pthread_create(&tid1, NULL, thread1_func, "hello1");
pthread_create(&tid2, NULL, thread2_func, "hello2");
pthread_join(tid1, NULL);
pthread_join(tid2, NULL);
check32(tick32, 1);
check64(tick64, 1);
}
int main(int argc, char **argv)
{
test_pthread();
puts(err ? "FAIL" : "PASS");
return err;
}
|