-
Notifications
You must be signed in to change notification settings - Fork 3
/
LineFollowingImageProcessing.py
182 lines (140 loc) · 5.7 KB
/
LineFollowingImageProcessing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
"""
This program is the Version 2 of the Line following, Here we use Video of arena instead of images of arena
Points to be noted :
- First the arena must be placed in front of the camera module before the program executes
- This program is made with the assumption that the arena is for Robocon 2017
"""
import numpy as np
import cv2
def createLineIterator(P1, P2, img):
"""
Produces an array that consists of the coordinates and intensities of each pixel in a line between 2 points
Parameters :
-P1 : array with first point(x, y)
-P2 : array with second point(x, y)
-img: image
"""
#define Local Variables
imageH = img.shape[0] #Stores the width of the image (frame)
imageW = img.shape[1] #Stores the height of the image (frame)
P1X = P1[0]
P2X = P2[0]
P1Y = P1[1]
P2Y = P2[1]
#difference between local and absolute difference between points
dX = P2X - P1X
dY = P2Y - P1Y
dXa = abs(dX)
dYa = abs(dY)
#Predefine numpy array for output based on the distance between points
itBuffer = np.empty(shape = (np.maximum(dYa, dXa), 3), dtype=np.float32)
itBuffer.fill(np.nan)
#Obtain coordinates along the line using a form of Bresenham's algo
negY = P1Y > P2Y
negX = P1X > P2Y
if P1Y == P2Y: #Horizontal line segment
itBuffer[:,1] = P1Y
if negX:
itBuffer[:,0] = np.arange(P1X-1, P1X-dXa-1, -1)
else:
itBuffer[:,0] = np.arange(P1X+1, P1X+dXa+1)
#Remove points outside image
colX = itBuffer[:,0]
colY = itBuffer[:,1]
itBuffer = itBuffer[ (colX >= 0) & (colY >= 0 ) & (colX < imageW) & (colY < imageH) ]
#Get intensities from img ndarray
itBuffer[:, 2] = img[itBuffer[:,1].astype(np.uint), itBuffer[:,0].astype(np.uint)]
return itBuffer
"""
Main function begins now
"""
#If not used each and every value of output won't be shown
np.set_printoptions(threshold='nan')
cap = cv2.VideoCapture(1)
cap.set(3, 288)
cap.set(4, 352)
afterJunctionFlag = 1
while True:
#Capture frame by frame
ret, frame = cap.read()
frame = cv2.resize(frame, (800, 600))
#Our Operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Blurring the image
gray = cv2.GaussianBlur(gray, (5,5) ,0)
#Converts the image to binary image. thresh1 is the binary image
ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#ret, thresh1 = cv2.threshold(gray, 143, 255, cv2.THRESH_BINARY)
cv2.imshow('thresh', thresh1)
#p1 and p2 are the points on the line segment whose distance must be
p1 = [0,300]
p2 = [800, 300]
p3 = [0,500]
p4 = [800,500]
#The return value from the function is the array consisting of [x, y, intensity]
intensityBuf = np.array(createLineIterator(p1, p2, thresh1))
intensityBuf1 = np.array(createLineIterator(p3, p4, thresh1))
#Only intensities are extracted from the intensityBuf array
onlyIntensity = intensityBuf[:,2].tolist()
onlyIntensity1 = intensityBuf1[:,2].tolist()
#Gets the index of the first pixel consisting of white pixel
try:
firstWhiteIndex = onlyIntensity.index(255)
except:
firstWhiteIndex = 20
try:
firstWhiteIndex1 = onlyIntensity1.index(255)
except:
firstWhiteIndex1 = 20
#Searches the first black pixel after finding first white pixel
for coord in range(firstWhiteIndex, len(onlyIntensity), 1):
if onlyIntensity[coord] == 0:
firstBlackIndex = coord-1
break
else:
firstBlackIndex = 780
for coord in range(firstWhiteIndex1, len(onlyIntensity1), 1):
if onlyIntensity1[coord] == 0:
firstBlackIndex1 = coord-1
break
else:
firstBlackIndex1 = 780
#Finds the coordinates of the first white Pixel
p1Cir = intensityBuf[firstWhiteIndex].tolist()
#Finds the coordinates of the first black Pixel
p2Cir = intensityBuf[firstBlackIndex].tolist()
#Finds the coordinates of the first white Pixel
p3Cir = intensityBuf1[firstWhiteIndex1].tolist()
#Finds the coordinates of the first black Pixel
p4Cir = intensityBuf1[firstBlackIndex1].tolist()
#For Detecting the Junction
if p1Cir[0] < 40 and p2Cir[0] > 600 and afterJunctionFlag == 1:
print "Junction Detected"
afterJunctionFlag = 0
if p1Cir[0] > 40 and p2Cir[0] < 600 and afterJunctionFlag == 0:
print "Junction Crossed"
afterJunctionFlag = 1
d1 = 403 - p1Cir[0]
d2 = p2Cir[0] - 403
if d1 > d2 or d2 < 0 :
print "Bot should move towards left now"
if d2 > d1 or d1 < 0 :
print "Bot should move towards right now"
#Draw the first circle at the first white pixel
gray = cv2.circle(gray, (int(p1Cir[0]), int(p1Cir[1])), 5, (0, 0, 255), -1)
#Draw the second circle at the first black pixel after the array of white pixel
gray = cv2.circle(gray, (int(p2Cir[0]), int(p2Cir[1])), 5, (0, 0, 255), -1)
#Draw the first circle at the first white pixel
gray = cv2.circle(gray, (int(p3Cir[0]), int(p3Cir[1])), 5, (0, 0, 255), -1)
#Draw the second circle at the first black pixel after the array of white pixel
gray = cv2.circle(gray, (int(p4Cir[0]), int(p4Cir[1])), 5, (0, 0, 255), -1)
#Drawing a line to divide the frame to two halfs
gray = cv2.line(gray, (400, 0), (400, 600), (0, 0, 0), 3)
#Display the resulting frame
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
na
#When everything is done
cap.release()
cv2.destroyAllWindows()