-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy paththread_spin_lock.c
140 lines (103 loc) · 3.2 KB
/
thread_spin_lock.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
#include "types.h"
#include "stat.h"
#include "user.h"
#include "x86.h"
struct thread_spinlock {
uint locked; // Is the lock held?
// For debugging:
char *name; // Name of lock.
struct cpu *cpu; // The cpu holding the lock.
uint pcs[10]; // The call stack (an array of program counters)
// that locked the lock.
};
struct thread_spinlock lock;
struct balance {
char name[32];
int amount;
};
volatile int total_balance = 0;
volatile unsigned int delay (unsigned int d) {
unsigned int i;
for (i = 0; i < d; i++) {
__asm volatile( "nop" ::: );
}
return i;
}
void
thread_spin_init(struct thread_spinlock *lk, char *name)
{
lk->name = name;
lk->locked = 0;
lk->cpu = 0;
}
// Acquire the lock.
// Loops (spins) until the lock is acquired.
// Holding a lock for a long time may cause
// other CPUs to waste time spinning to acquire it.
void
thread_spin_lock(struct thread_spinlock *lk)
{
// pushcli(); // disable interrupts to avoid deadlock.
// if(holding(lk))
// panic("acquire");
// The xchg is atomic.
while(xchg(&lk->locked, 1) != 0){
};
// Tell the C compiler and the processor to not move loads or stores
// past this point, to ensure that the critical section's memory
// references happen after the lock is acquired.
__sync_synchronize();
// Record info about lock acquisition for debugging.
// lk->cpu = mycpu();
// getcallerpcs(&lk, lk->pcs);
}
// Release the lock.
void
thread_spin_unlock(struct thread_spinlock *lk)
{
// if(!holding(lk))
// panic("release");
lk->pcs[0] = 0;
lk->cpu = 0;
// Tell the C compiler and the processor to not move loads or stores
// past this point, to ensure that all the stores in the critical
// section are visible to other cores before the lock is released.
// Both the C compiler and the hardware may re-order loads and
// stores; __sync_synchronize() tells them both not to.
__sync_synchronize();
// Release the lock, equivalent to lk->locked = 0.
// This code can't use a C assignment, since it might
// not be atomic. A real OS would use C atomics here.
asm volatile("movl $0, %0" : "+m" (lk->locked) : );
}
void do_work(void *arg){
int i;
int old;
struct balance *b = (struct balance*) arg;
printf(1, "Starting do_work: s:%s\n", b->name);
for (i = 0; i < b->amount; i++) {
thread_spin_lock(&lock);
old = total_balance;
delay(100000);
total_balance = old + 1;
thread_spin_unlock(&lock);
}
printf(1, "Done s:%x\n", b->name);
thread_exit();
return;
}
int main(int argc, char *argv[]) {
struct balance b1 = {"b1", 3200};
struct balance b2 = {"b2", 2800};
void *s1, *s2;
int t1, t2, r1, r2;
s1 = malloc(4096);
s2 = malloc(4096);
t1 = thread_create(do_work, (void*)&b1, s1);
t2 = thread_create(do_work, (void*)&b2, s2);
r1 = thread_join();
r2 = thread_join();
printf(1, "Threads finished: (%d):%d, (%d):%d, shared balance:%d\n",
t1, r1, t2, r2, total_balance);
exit();
}