forked from kruegg21/kendo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
kendo.py
executable file
·237 lines (181 loc) · 7.31 KB
/
kendo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
#!/usr/bin/python2
import sys
import multiprocessing
class Kendo():
"""Arbitrator through which all lock requests must go through.
Members:
manager - multiprocessing Manager
num_locks - number of available locks
clocks - deterministic logical times for each process
lrlt_list - last release times for each lock
lock_held_list - list of which locks are held/free
init_shared_mem - initial shared memory state
shared_mem - shared memory map
priorities - thread 'priorities' for acquiring locks
"""
def __init__(self, max_processes, num_locks, priorities=None, \
debug=False, init_shared_mem=dict()):
"""Initialize a Kendo arbitrator.
Args:
max_processes - the maximum possible number of processes that will run
num_locks - number of locks available to all processes
debug - whether or not to be verbose
priorities - iterable of thread priorities
init_shared_mem - initial shared memory state
"""
# Create a global mutex for bookkeeping and dumping debug messages
self.global_lock = multiprocessing.Lock()
self.debug = debug
self.num_locks = num_locks
self.max_processes = max_processes
self.init_shared_mem = init_shared_mem
self.processes = []
# Initialize priorities. By default, every thread has the same
# priority
if priorities is None:
priorities = [1 for i in xrange(max_processes)]
self.priorities = priorities
# Initialize all locks that could be used
self.manager = multiprocessing.Manager()
self.locks = [self.manager.Lock() for i in xrange(num_locks)]
# Initialize shared memory
self.shared_mem = self.manager.dict(init_shared_mem)
# Initialize deterministic logical clocks
self.clocks = self.manager.list([0] * max_processes)
# Initialize lock release times
self.lrlt_list = self.manager.list([0] * num_locks)
# ...and lock statuses
self.lock_held_list = self.manager.list([False] * num_locks)
def reset(self):
"""Reset the arbitrator for another run()"""
self.shared_mem = self.manager.dict(self.init_shared_mem)
self.clocks = self.manager.list([0] * len(self.clocks))
self.lrlt_list = self.manager.list([0] * self.num_locks)
self.lock_held_list = self.manager.list([False] * self.num_locks)
def det_mutex_lock(self, pid, lock_number):
"""Attempt to acquire a mutex
Args:
pid - ID/index of process
lock_number - index of lock the process wants
"""
while True:
self.wait_for_turn(pid)
if self.debug:
self._log( \
"Process", pid, "'s Turn with Lock", lock_number \
, "CLOCKS", self.clocks \
, "LAST RELEASE TIME", self.lrlt_list)
# TODO: docs
self.global_lock.acquire()
if self.try_lock(lock_number):
self.global_lock.release()
if self.lrlt_list[lock_number] >= self.clocks[pid]:
# Atomically release and label lock as not held
# XXX edge case when starting processes must wait one turn wheen lrlt_list is all zeros
self.global_lock.acquire()
self.locks[lock_number].release()
self.lock_held_list[lock_number] = False
self.global_lock.release()
else:
if self.debug:
self._log( \
"Process", pid, "Locking Lock", lock_number \
, "CLOCKS", self.clocks)
break
else:
self.global_lock.release()
# Increment the process's logical time while it's spinning
self.clocks[pid] += 1
# Increment the process's logical time after acquisition
self.clocks[pid] += self.priorities[pid]
# Log
self._log("ORDER_LOG: ", "pid = ", pid, " acquired lock ", lock_number)
def det_mutex_unlock(self, pid, lock_number):
"""Deterministically unlock a mutex.
Args:
pid - ID/index of the calling process
lock_number - index of the lock to unlock
"""
# Atomically release and label lock as not held
self.global_lock.acquire()
self.lock_held_list[lock_number] = False
self.lrlt_list[lock_number] = self.clocks[pid]
self.locks[lock_number].release()
self.clocks[pid] += 1
if self.debug:
print "Process", pid, "Unlocking Lock", lock_number
print "CLOCKS", self.clocks
print "LAST RELEASE TIME", self.lrlt_list[lock_number]
print '\n'
self.global_lock.release()
# Log
self._log("ORDER_LOG: ", "pid = ", pid, " released lock ", lock_number)
def try_lock(self, lock_number):
"""Try to obtain a lock.
Args:
lock_number - index of the desired lock
Returns True if lock is free, False if acquisition failed
"""
# Check if the lock is free
if not self.lock_held_list[lock_number]:
self.lock_held_list[lock_number] = True
self.locks[lock_number].acquire()
return True
return False
def wait_for_turn(self, pid):
"""Wait until the given process can proceed
Args:
pid - ID/index of the process
"""
# Get the process's logical time
process_clock_value = self.clocks[pid]
# Spin while it's either not its turn, or it has to wait until a certain
# logical time.
while True:
if process_clock_value == min(self.clocks) and pid == self.clocks.index(min(self.clocks)):
break
def pause_logical_clock(self):
pass
def resume_logical_clock(self):
pass
def increment_logical_clock(self):
pass
def run(self):
"""Run all processes"""
if self.debug:
self._log("Starting to run all processes...")
threads = []
for p in self.processes:
t = multiprocessing.Process(target=p.run)
threads.append(t)
t.start()
for t in threads:
t.join()
if self.debug:
self._log("Done!")
def register_process(self, process):
"""Register a process to be run with this arbitrator
Args:
process - the process to be run
Returns the PID of the process, None if something went awry.
"""
if len(self.processes) < self.max_processes:
self.processes.append(process)
return len(self.processes) - 1
def mutate_shared(self, name, value):
"""Add/mutate a shared object to simulate shared memory.
Args:
name - name of the value
value - value
"""
self.shared_mem[name] = value
def _log(self, *args):
"""Output for debugging"""
self.global_lock.acquire()
for a in args:
sys.stdout.write(str(a))
sys.stdout.write(' ')
sys.stdout.write('\n')
self.global_lock.release()
if __name__ == "__main__":
pass