-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathrun_DetectDataset.py
334 lines (225 loc) · 8.64 KB
/
run_DetectDataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageGrab
import time
import win32api, win32con
import json
import pytesseract
# -------------------------------------------------------
# _____ _ _ _ _ ____ _____ ___ ___ _ _ ____
# | ___| | | | \ | |/ ___|_ _|_ _/ _ \| \ | / ___|
# | |_ | | | | \| | | | | | | | | | \| \___ \
# | _| | |_| | |\ | |___ | | | | |_| | |\ |___) |
# |_| \___/|_| \_|\____| |_| |___\___/|_| \_|____/
#
# -------------------------------------------------------
def detectCircle(image, name, verbose):
all_circs = cv2.HoughCircles(image, cv2.HOUGH_GRADIENT, 1.2, 30, param1=60, param2=40, minRadius=0, maxRadius=1000)
if(all_circs is None):
all_circs_rounded = []
else:
all_circs_rounded = np.uint16(np.around(all_circs))
count = 1
if(all_circs is not None):
for i in all_circs_rounded[0,:]:
cv2.circle(image, (i[0],i[1]),i[2],(50,200,200),5)
cv2.circle(image, (i[0],i[1]),2,(255,0,0),3)
count +=1
if(verbose==1):
cv2.imshow(name, image)
return all_circs_rounded
def closestHold(y,x,holds_pos):
lett_str = ["A","B","C","D","E","F","G","H","I","J","K"]
num_str = ["18","17","16","15","14","13","12","11","10","9","8","7","6","5","4","3","2","1"]
# find the closest on y axis
ymin = 1000
for i in range(0,18):
ydist = np.abs(y-holds_pos[i,0,0])
if(ydist < ymin):
ymin = ydist
closest_row = i
# find the closest on x axis
xmin = 1000
for j in range(0,11):
xdist = np.abs(x-holds_pos[0,j,1])
if(xdist < xmin):
xmin = xdist
closest_col = j
return [num_str[closest_row], lett_str[closest_col]]
def nextProblem(tl_x, tl_y, br_x, br_y, SCREEN_WIDTH, SCREEN_HEIGHT):
x = int(tl_x + (br_x-tl_x) * 0.88)
y = int((tl_y+br_y)/2)
# move to roght side of the app
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE | win32con.MOUSEEVENTF_ABSOLUTE, int(x/SCREEN_WIDTH*65535.0), int(y/SCREEN_HEIGHT*65535.0))
# press the left button
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
# slightly drag the clicked mouse to the left
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE | win32con.MOUSEEVENTF_ABSOLUTE, int((x-50)/SCREEN_WIDTH*65535.0), int(y/SCREEN_HEIGHT*65535.0))
# let go the left button
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x-50,y,0,0)
def writeJSONfile(n, r_hold, g_hold, b_hold, bench_check, text, json_decoded):
# Write the json informations
problem_name = text[0]
problem_grade = text[1]
problem_dict = {
"Name" : problem_name,
"Grade" : problem_grade,
"IsBenchmark": bench_check,
"Moves" : [],
"Sended": False
}
# Cycle over red holds - TOP
for k in range(0, len(r_hold)):
hold_name = str(r_hold[k][0])+str(r_hold[k][1])
hold_start = False
hold_top = True
move_dict = {
"Description": hold_name,
"IsStart": hold_start,
"IsEnd": hold_top
}
problem_dict["Moves"].append(move_dict)
# Cycle over blue holds - MIDDLE
for k in range(0, len(b_hold)):
hold_name = str(b_hold[k][0])+str(b_hold[k][1])
hold_start = False
hold_top = False
move_dict = {
"Description": hold_name,
"IsStart": hold_start,
"IsEnd": hold_top
}
problem_dict["Moves"].append(move_dict)
# Cycle over green holds - START
for k in range(0, len(g_hold)):
hold_name = str(g_hold[k][0])+str(g_hold[k][1])
hold_start = True
hold_top = False
move_dict = {
"Description": hold_name,
"IsStart": hold_start,
"IsEnd": hold_top
}
problem_dict["Moves"].append(move_dict)
json_decoded[str(n)] = problem_dict
def checkBenchmark(in_image, rx, lx):
image = cv2.cvtColor(in_image, cv2.COLOR_BGR2GRAY)
all_circs = cv2.HoughCircles(image, cv2.HOUGH_GRADIENT, 0.5, 50, param1=10, param2=18, minRadius=0, maxRadius=1000)
all_circs_rounded = np.uint16(np.around(all_circs))
# Compute distance between 2 circles, and decide if it's benchmark or not
num1 = all_circs_rounded[0,0][0]
num2 = all_circs_rounded[0,1][0]
distance = np.abs(np.subtract(num1, num2))
if(distance/(rx-lx) > 0.5):
bench_state = False
else:
bench_state = True
return bench_state
def detectText(image):
# cut out only the important part of the image
x_1 = int(0.17*(ss_region_br_x-ss_region_tl_x))
x_2 = int(0.88*(ss_region_br_x-ss_region_tl_x))
y_1 = int(0.058*(ss_region_br_y-ss_region_tl_y))
y_2 = int(0.080*(ss_region_br_y-ss_region_tl_y))
img = image[y_1:y_2, x_1:x_2]
invert = cv2.bitwise_not(img)
text = pytesseract.image_to_string(invert)
line = text.split('\n')
content = line[0].split(',')
if(len(content) < 2):
content.append('NA')
#print(line)
#print(content)
#cv2.imshow('c', invert)
#cv2.waitKey(0)
return content
# --------------------------------
# __ __ _ ___ _ _
# | \/ | / \ |_ _| \ | |
# | |\/| | / _ \ | || \| |
# | | | |/ ___ \ | || |\ |
# |_| |_/_/ \_\___|_| \_|
#
# --------------------------------
# THINGS TO MODIFY FOR MAKING IT WORK ON ANY COMPUTER
# ****************************************************************************************************
# Open JSON file
json_path = r"YOUR PATH/MoonBoard_Dataset/sample.json"
# These values define the pixel region where the code will take a screenshot. Inside this area should
# be contained the emulator screen with the moonboard app opened on the first problem
ss_region_tl_x = 1752 # screen shot region, top left, x coord
ss_region_tl_y = 0 # screen shot region, top left, y coord
ss_region_br_x = 2518 # screen shot region, bottom right, x coord
ss_region_br_y = 1400 # screen shot region, bottom left, y coord
tot_problems = 1248 # Number of problems that you want to write in the dataset
# Height and width of your screen
SCREEN_WIDTH = 2560
SCREEN_HEIGHT = 1440
# These are the values that will define where are the real holds, need to fine tune this depending on your screen
yoff = 165
xoff = 84
x_sep = 43
y_sep = 43
# ****************************************************************************************************
with open(json_path) as json_file:
json_decoded = json.load(json_file)
# Cycle over all the problems available in the app
for n in range(0, tot_problems):
# Capture and save screen image
ss_region = (ss_region_tl_x,ss_region_tl_y, ss_region_br_x, ss_region_br_y)
ss_img = ImageGrab.grab(ss_region)
image = np.array(ss_img)
img = image[:, :, ::-1]
text = detectText(img)
# Benchmark check
bench_check_img = np.copy(img[955:, :])
bench_check = checkBenchmark(bench_check_img, ss_region_br_x, ss_region_tl_x)
# Exctract the circles (G,R,B)
low_r = np.array([0, 0, 210]) # minimum color red accepted in the mask
high_r = np.array([30, 30, 255]) # maximum color red accepted in the mask
maskR = cv2.inRange(img, low_r, high_r)
low_b = np.array([210, 0, 0])
high_b = np.array([255, 30, 30])
maskB = cv2.inRange(img, low_b, high_b)
low_g = np.array([0, 210, 0])
high_g = np.array([30, 255, 30])
maskG = cv2.inRange(img, low_g, high_g)
# Detect the centers and radiuses of circles
red_circles = detectCircle(maskR, "red detection",0)
blue_circles = detectCircle(maskB, "blue detection",0)
green_circles = detectCircle(maskG, "green detection",0)
# Create matrix that contains the real holds positions, need to fine tune parameter depending on the screen
holds_pos = np.zeros((18,11,2))
for j in range(0,18):
for i in range(0,11):
holds_pos[j,i,1] = xoff+x_sep*i
holds_pos[j,i,0] = yoff+y_sep*j
# ******** Uncomment the line below to debug and fine tune the parameters ********
#cv2.circle(img, (xoff+x_sep*i,yoff+y_sep*j),2,(255,0,0),3)
# ******** Uncomment the line below to debug and fine tune the parameters ********
#cv2.imshow('moon', img)
# Assign to each fiund circle the closes real hold
r_hold = []
b_hold = []
g_hold = []
if type(red_circles) is not list:
for i in range(0,red_circles.shape[1]):
hold = closestHold(red_circles[0,i,1],red_circles[0,i,0], holds_pos)
r_hold.append(hold)
if type(blue_circles) is not list:
for i in range(0,blue_circles.shape[1]):
hold = closestHold(blue_circles[0,i,1],blue_circles[0,i,0], holds_pos)
b_hold.append(hold)
if type(green_circles) is not list:
for i in range(0,green_circles.shape[1]):
hold = closestHold(green_circles[0,i,1],green_circles[0,i,0], holds_pos)
g_hold.append(hold)
# Write the results in a JSON file
writeJSONfile(n, r_hold, g_hold, b_hold, bench_check, text, json_decoded)
nextProblem(ss_region_tl_x, ss_region_tl_y, ss_region_br_x, ss_region_br_y, SCREEN_WIDTH, SCREEN_HEIGHT)
print(n)
#cv2.waitKey(0)
# Write everything in one json file
with open(json_path, 'w') as json_file:
json.dump(json_decoded, json_file, sort_keys=True, indent=4, separators=(',', ': '))