forked from leenachennuru/objRecognition
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsegmentation.py
140 lines (107 loc) · 4.48 KB
/
segmentation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
# Segmentation Approaches for Object Recognition
import numpy as np
import cv2
from matplotlib import pyplot as plt
import cv
import Image
from scipy.ndimage import label
##import pymeanshift as pms
from time import time
from cv import *
def otsuBin(imageGrayInput):
ret, thresh = cv2.threshold(imageGrayInput, 0, 255,
cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
return thresh
def meanShift(imageInput):
meanShifted = cv2.pyrMeanShiftFiltering(imageInput, 50, 50)
return meanShifted
def cannyEdge(imageInput):
grayScaleInput = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
edgeDetection = cv2.Canny(grayScaleInput, 50, 50)
return edgeDetection
def adapThresh(imageInput):
grayScaleInput = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
threshAdaptive = cv2.adaptiveThreshold(grayScaleInput, 255, 1, 1, 11, 2)
return threshAdaptive
def contourFind(prepImage):
contours, hierarchy = cv2.findContours(prepImage, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
return contours, hierarchy
def featureDetectDesORB(roiImageFiltered):
orb = cv2.ORB()
kp, des = orb.detectAndCompute(roiImageFiltered, None)
roiKeyPointImage = cv2.drawKeypoints(roiImageFiltered, kp, None, (255, 0, 0), 2)
return kp, des, roiKeyPointImage
def featureDescriptorORB(roiImageFiltered, kp):
orb = cv2.ORB()
kp, des = orb.compute(roiImageFiltered, kp)
roiKeyPointImage = cv2.drawKeypoints(roiImageFiltered, kp, color=(0, 255, 0), flags=0)
return kp, des, roiKeyPointImage
def featureDetectCorner(roiImageFiltered):
fast = cv2.FastFeatureDetector()
kp = fast.detect(roiImageFiltered, None)
roiKeyPointImage = cv2.drawKeypoints(roiImageFiltered, kp, color=(255, 0, 0))
return kp, roiKeyPointImage
# to be ignored function
def featureDetectDesSIFT(roiImageFiltered):
detector = cv2.FeatureDetector_create("SURF")
descriptor = cv2.DescriptorExtractor_create("SURF")
kp = detector.detect(roiImageFiltered)
kp, des = descriptor.compute(roiImageFiltered, kp)
def featureDetectDesCorner(roiImageFiltered):
fast = cv2.FastFeatureDetector()
kp = fast.detect(roiImageFiltered, None)
roiKeyPointImage = cv2.drawKeypoints(roiImageFiltered, kp, color=(255, 0, 0))
return kp, roiKeyPointImage
inputImage=cv2.imread('TrainingSet/TrainingSetBelow/BlueApple_55_inpImg.png')
# inputImage = cv2.imread('Lenna.png')
grayScaleInput = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Otsu Binarization
outputOtsu = otsuBin(grayScaleInput)
# Mean Shift Prefiltering
outputMeanShift = meanShift(inputImage)
# Mean Shift Prefiltering and Otsu
grayMeanShift = cv2.cvtColor(outputMeanShift, cv2.COLOR_BGR2GRAY)
outputMeanShiftOtsu = otsuBin(grayMeanShift)
# Canny Edge Detection
cannyEdges = cannyEdge(inputImage)
# adaptive Thresholding for Image
threshAdaptive = adapThresh(inputImage)
# Finding Contours on a preprocessed Image
contours, hierarchy = contourFind(outputMeanShiftOtsu)
inputImageCopy = inputImage.copy()
contourImage = cv2.drawContours(inputImage, contours, -1, (0, 255, 0), -1)
boundBoxContour = grayScaleInput.copy()
counter = 0
for cnt in contours:
# Bounding Box around the contour
[x, y, w, h] = cv2.boundingRect(cnt)
cv2.rectangle(boundBoxContour, (x, y), (x+w, y+h), (0, 255, 0), 2)
# the bounding box is the region of interest representative of the object
extendBBox = 20
roiImage = grayScaleInput[y-extendBBox:y+h+extendBBox, x-extendBBox:x+w+extendBBox]
# Median Filtering on the object proposal or ROI
roiImageFiltered = cv2.medianBlur(roiImage, 3)
# Extract ORB Features from the object
kp, drawnKeyPoints = featureDetectCorner(roiImageFiltered)
cv2.imwrite('kp' + str(counter) + '.png', drawnKeyPoints)
counter = counter + 1
# kp, drawnKeyPoints = featureDetectCorner(grayScaleInput)
# Image Saving and Output Saving
cv2.imwrite('otsuOutput.png', outputOtsu)
cv2.imwrite('meanShift.png', outputMeanShift)
cv2.imwrite('meanShiftOtsu.png', outputMeanShiftOtsu)
cv2.imwrite('cannyOutput.png', cannyEdges)
cv2.imwrite('adaptiveThres.png', threshAdaptive)
# Image Display and Output Display
while 1:
cv2.imshow('otsu', outputOtsu)
cv2.imshow('meanShift', outputMeanShift)
cv2.imshow('meanShiftOtsu', outputMeanShiftOtsu)
cv2.imshow('cannyEdgeDetection', cannyEdges)
cv2.imshow('adaptiveThresh', threshAdaptive)
cv2.imshow('contourImage', boundBoxContour)
k = cv2.waitKey(5)
if k == 27:
break
cv2.destroyAllWindows()