Skip to content

Commit

Permalink
first commit
Browse files Browse the repository at this point in the history
  • Loading branch information
bamcasa committed Oct 31, 2020
0 parents commit 4125736
Show file tree
Hide file tree
Showing 6 changed files with 382 additions and 0 deletions.
22 changes: 22 additions & 0 deletions convert_grayscale.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import cv2,os

directory = "crop_image"

path, dirs, files = next(os.walk(directory))
print(len(files))

for file in files:
img = cv2.imread(f"{directory}/{file}", cv2.IMREAD_COLOR)

height, width, channel = img.shape

for i in range(height):
for j in range(width):
if img.item(i, j, 0) + img.item(i, j, 1) + img.item(i, j, 2) > 500:
for m in range(3):
img.itemset(i, j, m, 255)
else:
for m in range(3):
img.itemset(i, j, m, 0)
print(file)
cv2.imwrite(f"{directory}/{file}", img)
168 changes: 168 additions & 0 deletions crop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
#!/usr/bin/env python
# coding: utf-8

# In[1]:


import cv2
import numpy as np
import matplotlib.pyplot as plt
import os

plt.style.use('dark_background')



# # Read Input Image

# In[3]:


path, dirs, files = next(os.walk("test1"))
print(len(files))

for file in files:
img_ori = cv2.imread(f'test1/{file}')

height, width, channel = img_ori.shape

# plt.figure(figsize=(12, 10))
# cv2.imshow("img_ori",img_ori)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

# # Convert Image to Grayscale

# In[4]:

# hsv = cv2.cvtColor(img_ori, cv2.COLOR_BGR2HSV)
# gray = hsv[:,:,2]
gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)

# cv2.imshow("gray",gray)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

# # Maximize Contrast (Optional)

# In[5]:

structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

imgTopHat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, structuringElement)
imgBlackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, structuringElement)

imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat)
gray = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)

# cv2.imshow("gray",gray)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

# # Adaptive Thresholding

# In[6]:

img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)

img_thresh = cv2.adaptiveThreshold(
img_blurred,
maxValue=255.0,
adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
thresholdType=cv2.THRESH_BINARY_INV,
blockSize=19,
C=9
)

# cv2.imshow("gray",gray)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

# # Find Contours

# In[ ]:

contours, _ = cv2.findContours(
img_thresh,
cv2.RETR_LIST,
method=cv2.CHAIN_APPROX_SIMPLE
)

temp_result = np.zeros((height, width, channel), dtype=np.uint8)

cv2.drawContours(temp_result, contours=contours, contourIdx=-1, color=(255, 255, 255))

# cv2.imshow("temp_result",temp_result)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

# # Prepare Data

# In[10]:

temp_result = np.zeros((height, width, channel), dtype=np.uint8)

contours_dict = []

for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
cv2.rectangle(temp_result, pt1=(x, y), pt2=(x + w, y + h), color=(255, 255, 255), thickness=2)

# insert to dict
contours_dict.append({
'contour': contour,
'x': x,
'y': y,
'w': w,
'h': h,
'cx': x + (w / 2),
'cy': y + (h / 2)
})

# cv2.imshow("temp_result",temp_result)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

MIN_AREA = 80
MIN_WIDTH, MIN_HEIGHT = 4, 4
MIN_RATIO, MAX_RATIO = 0.3, 5.0
# 수치 조정 해야함 <------------------------------------------------------------------------------------------------------------

possible_contours = []

cnt = 0
for d in contours_dict:
area = d['w'] * d['h']
ratio = d['w'] / d['h']

if area > MIN_AREA and d['w'] > MIN_WIDTH and d['h'] > MIN_HEIGHT and MIN_RATIO < ratio < MAX_RATIO:
d['idx'] = cnt
cnt += 1
possible_contours.append(d)

# visualize possible contours

dst = []

for d in possible_contours:
# cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))
if d['h'] >= 40 and d['w'] >= 40:
dst.append(img_ori[d['y']:d['y'] + d['h'], d['x']:d['x'] + d['w']])

number = 1
for img in dst:
cv2.imwrite(f"crop_image/{number}_{file}", img)
print(f"crop_image/{number}_{file}")
number += 1

#for d in possible_contours:
# cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))
# cv2.rectangle(img_ori, pt1=(d['x'], d['y']), pt2=(d['x'] + d['w'], d['y'] + d['h']), color=(211, 200, 86),
# thickness=2)

#cv2.imshow("img_ori", img_ori)
#cv2.waitKey(0)
#cv2.destroyAllWindows()



47 changes: 47 additions & 0 deletions division.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import os

with open("model_labels.txt", "r", encoding="utf-8") as f:
lines = f.readlines()
#for line in lines:
# print(line.strip())

path, dirs, files = next(os.walk("crop_image"))

# Disable scientific notation for clarity
np.set_printoptions(suppress=True)

# Load the model
model = tensorflow.keras.models.load_model('keras_model.h5')

# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)

for file in files:
image = Image.open(f'crop_image/{file}')
original_image = Image.open(f'crop_image/{file}')

size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)

#image.show()
#original_image.show()

image_array = np.asarray(image)

normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1

data[0] = normalized_image_array

prediction = model.predict(data)
print(f"{file}",lines[prediction.argmax()].strip(), prediction.max())
if prediction.max() >= 0.9:
if os.path.isdir(f"Classified_image/{lines[prediction.argmax()].strip().split()[1]}"):
original_image.save(f"Classified_image/{lines[prediction.argmax()].strip().split()[1]}/{file}")
else:
os.mkdir(f"Classified_image/{lines[prediction.argmax()].strip().split()[1]}")
original_image.save(f"Classified_image/{lines[prediction.argmax()].strip().split()[1]}/{file}")
Binary file added keras_model.h5
Binary file not shown.
18 changes: 18 additions & 0 deletions model_labels.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
0 ㄱ
1 ㄴ
2 ㄷ
3 ㄹ
4 ㅁ
5 ㅂ
6 ㅅ
7 ㅇ
8 ㅈ
9 ㅊ
10 ㅋ
11 ㅌ
12 ㅍ
13 ㅎ
14 ㅏ
15 ㅓ
16 ㅗ
17 ㅜ
127 changes: 127 additions & 0 deletions old_division.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
import time
start = time.time()

import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import random
import cv2

np.set_printoptions(suppress=True)

#다람쥐헌쳇바퀴에타고파 문장의 각각 모델을 리스트에 불러옴
model = np.arange(11, dtype=object)
image = np.arange(11, dtype=object)

text = "다람쥐헌쳇바퀴에타고파"
text1 = "abcdefghijk"

f = open("model/text.txt", 'r', encoding="utf-8")
line = f.readline()
lines = list(line.replace(" ",""))

print(lines)
save_name = []
name = 0


for i in range(len(lines)):
a = lines[i]
uni = a.encode("unicode_escape")
uni = str(uni)
uni = uni.replace("b'\\\\u", "").replace("'","")
uni = uni.upper()
save_name.append(uni)


for i in range(11):
model[i] = tensorflow.keras.models.load_model(f"model/{text[i]}.h5") #모델파일 불러오기
image[i] = Image.open(f"crop_image/{i}.jpg") # 이미지파일 불러오기


data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)

max_x_y = [0,0]

ah = 0

for j in range(11):
max = [0, 0, 0]
max_i = [0, 0, 0]
#if j != 6:
# time = 1
#else:
# time = 1000
for i in range(1501):
size = random.randrange(80, 120)
x = random.randrange(0, 224 - size)
y = random.randrange(0, 224 - size)
cropImage = image[j].crop((x, y, x + size, y + size))
cropImage.save(f"glyphs/{i}.jpg")

# 크기 224*244로 변환
size2 = (224, 224)
cropImage = ImageOps.fit(cropImage, size2, Image.ANTIALIAS)

# 이미지 numpy배열로 변환
image_array = np.asarray(cropImage)

normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1

data[0] = normalized_image_array

prediction = model[j].predict(data)
"""
if j == 3:
prediction[0][1] = prediction[0][1] + prediction[0][2]
prediction[0][2] = prediction[0][3]
"""


#print(f"{j} {i} : {prediction}")
print(f"{j} {i} : {prediction}")
if prediction[0][0] > max[0]:
max[0] = prediction[0][0]
max_i[0] = i
if j == 2 or j == 6:
max_x_y[0] = x + size
max_x_y[1] = y + size
if prediction[0][1] > max[1]:
max[1] = prediction[0][1]
max_i[1] = i
if prediction[0][2] > max[2]:
max[2] = prediction[0][2]
max_i[2] = i

for i in range(3):
if (j == 0 or j == 5 or j == 7 or j == 8 or j == 9 or j == 10) and i == 2:
break
if j == 2 and i == 1:
img = Image.open(f'crop_image/2.jpg')
px = img.load()
for k in range(0,max_x_y[0]):
for l in range(0,max_x_y[1]):
px[k, l] = (255, 255, 255)
img.save(f'correct_image/{save_name[name]}.jpg')
name += 1
break
if j == 6 and i == 1:
img = Image.open(f'crop_image/6.jpg')
px = img.load()
for k in range(0,max_x_y[0]):
for l in range(0,max_x_y[1]):
px[k, l] = (255, 255, 255)
img.save(f'correct_image/{save_name[name]}1.jpg')
name += 1
break
if save_name[name] == "ㅏ" and a >= 1:
break
elif save_name[name] == "ㅏ" and a == 0:
a+= 1
correct_image = Image.open(f"glyphs/{max_i[i]}.jpg")
correct_image.save(f"correct_image/{save_name[name]}.jpg")
name += 1
print(max_i[i], max[i])
#correct_image.show()

print("걸린시간 : ",time.time() - start)

0 comments on commit 4125736

Please sign in to comment.