-
Notifications
You must be signed in to change notification settings - Fork 1
/
b_opencv_driver.py
458 lines (360 loc) · 14.4 KB
/
b_opencv_driver.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
import cv2
import numpy as np
import logging
import math
import utils.motor_lib.driver as driver
import b_overrides as ovr
import time
import datetime
import sys
def gstreamer_pipeline(
sensor_id=0,
capture_width=1920,
capture_height=1080,
display_width=960,
display_height=540,
framerate=30,
flip_method=0,
):
return (
"nvarguscamerasrc sensor-id=%d !"
"video/x-raw(memory:NVMM), width=(int)%d, height=(int)%d, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
sensor_id,
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
BASE_SPEED = 100
_SHOW_IMAGE = False
STEP_SIZE = 5 # DEFAULT 5 - the less steps, the more accurate the obstacle detection
MIN_DISTANCE = 250 # DEFAULT 250 - the distance to consider an obstacle (the larger the number the closer the obstacle is)
MIN_UTURN_THRESHOLD = 300 # DEFAULT 400 - the distance to consider a u-turn (the larger the number the closer the u-turn has to be)
FEELER_AMOUNT = 5 # MINIMUM OF 5 - the amount of feelers to use (the more feelers the more accurate the obstacle detection)
MIDDLE_ANGLE = 635 # DEFAULT 635 - use the included reference_feeler to determine the middle angle
class B_OpenCV_Driver(object):
def __init__(self, car=None):
logging.info('Creating a B_OpenCV_Driver...')
self.car = car
self.curr_steering_angle = 90
def follow_lane(self, frame):
# Main entry point of the lane follower
show_image("orig", frame)
lane_lines, frame = detect_lane(frame)
final_frame = self.steer(frame, lane_lines)
return final_frame, self.curr_steering_angle
def steer(self, frame, lane_lines):
logging.debug('steering...')
if len(lane_lines) == 0:
logging.error('No lane lines detected, nothing to do.')
return frame
new_steering_angle = compute_steering_angle(frame, lane_lines)
self.curr_steering_angle = stabilize_steering_angle(self.curr_steering_angle, new_steering_angle, len(lane_lines))
pwm = throttle_angle_to_thrust(BASE_SPEED, self.curr_steering_angle - 90)
#print(f"Left: {pwm[0]}, Right: {pwm[1]}")
# actually write values to motor
driver.move(pwm[0], pwm[1])
print(f"Calculated Steering Angle: {self.curr_steering_angle}")
curr_heading_image = display_heading_line(frame, self.curr_steering_angle)
show_image("heading", curr_heading_image)
return curr_heading_image
############################
# Frame processing steps
############################
def detect_lane(frame):
logging.debug('detecting lane lines...')
edges = detect_edges(frame)
show_image('edges', edges)
cropped_edges = region_of_interest(edges)
show_image('edges cropped', cropped_edges)
line_segments = detect_line_segments(cropped_edges)
line_segment_image = display_lines(frame, line_segments)
show_image("line segments", line_segment_image)
lane_lines = average_slope_intercept(frame, line_segments)
lane_lines_image = display_lines(frame, lane_lines)
show_image("lane lines", lane_lines_image)
return lane_lines, lane_lines_image
def detect_edges(frame):
# filter for blue lane lines
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
show_image("hsv", hsv)
lower_blue = np.array([93, 132, 0])
upper_blue = np.array([113, 255, 255])
lower_yellow = np.array([0, 0, 255])
upper_yellow = np.array([53, 76, 255])
mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)
mask_yellow = cv2.inRange(hsv, lower_yellow, upper_yellow)
show_image("blue mask", mask_blue)
show_image("yellow mask", mask_yellow)
combined_mask = cv2.bitwise_or(mask_blue, mask_yellow)
#cv2.imwrite("camera.test.png", combined_mask)
# detect edges
edges = cv2.Canny(combined_mask, 200, 400)
return edges
def detect_edges_old(frame):
# filter for blue lane lines
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
show_image("hsv", hsv)
for i in range(16):
lower_blue = np.array([30, 16 * i, 0])
upper_blue = np.array([150, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
show_image("blue mask Sat=%s" % (16* i), mask)
#for i in range(16):
#lower_blue = np.array([16 * i, 40, 50])
#upper_blue = np.array([150, 255, 255])
#mask = cv2.inRange(hsv, lower_blue, upper_blue)
#show_image("blue mask hue=%s" % (16* i), mask)
# detect edges
edges = cv2.Canny(mask, 200, 400)
return edges
def region_of_interest(canny):
height, width = canny.shape
mask = np.zeros_like(canny)
# only focus bottom half of the screen
polygon = np.array([[
(0, height * 1 / 2),
(width, height * 1 / 2),
(width, height),
(0, height),
]], np.int32)
cv2.fillPoly(mask, polygon, 255)
show_image("mask", mask)
masked_image = cv2.bitwise_and(canny, mask)
return masked_image
def detect_line_segments(cropped_edges):
# tuning min_threshold, minLineLength, maxLineGap is a trial and error process by hand
rho = 1 # precision in pixel, i.e. 1 pixel
angle = np.pi / 180 # degree in radian, i.e. 1 degree
min_threshold = 10 # minimal of votes
line_segments = cv2.HoughLinesP(cropped_edges, rho, angle, min_threshold, np.array([]), minLineLength=8,
maxLineGap=4)
if line_segments is not None:
for line_segment in line_segments:
logging.debug('detected line_segment:')
logging.debug("%s of length %s" % (line_segment, length_of_line_segment(line_segment[0])))
return line_segments
def average_slope_intercept(frame, line_segments):
"""
This function combines line segments into one or two lane lines
If all line slopes are < 0: then we only have detected left lane
If all line slopes are > 0: then we only have detected right lane
"""
lane_lines = []
if line_segments is None:
logging.info('No line_segment segments detected')
return lane_lines
height, width, _ = frame.shape
left_fit = []
right_fit = []
boundary = 1/3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line_segment in line_segments:
for x1, y1, x2, y2 in line_segment:
if x1 == x2:
logging.info('skipping vertical line segment (slope=inf): %s' % line_segment)
continue
fit = np.polyfit((x1, x2), (y1, y2), 1)
slope = fit[0]
intercept = fit[1]
if slope < 0:
if x1 < left_region_boundary and x2 < left_region_boundary:
left_fit.append((slope, intercept))
else:
if x1 > right_region_boundary and x2 > right_region_boundary:
right_fit.append((slope, intercept))
left_fit_average = np.average(left_fit, axis=0)
if len(left_fit) > 0:
lane_lines.append(make_points(frame, left_fit_average))
right_fit_average = np.average(right_fit, axis=0)
if len(right_fit) > 0:
lane_lines.append(make_points(frame, right_fit_average))
logging.debug('lane lines: %s' % lane_lines) # [[[316, 720, 484, 432]], [[1009, 720, 718, 432]]]
return lane_lines
def compute_steering_angle(frame, lane_lines):
""" Find the steering angle based on lane line coordinate
We assume that camera is calibrated to point to dead center
"""
if len(lane_lines) == 0:
logging.info('No lane lines detected, do nothing')
return -90
height, width, _ = frame.shape
if len(lane_lines) == 1:
logging.debug('Only detected one lane line, just follow it. %s' % lane_lines[0])
x1, _, x2, _ = lane_lines[0][0]
x_offset = x2 - x1
else:
_, _, left_x2, _ = lane_lines[0][0]
_, _, right_x2, _ = lane_lines[1][0]
camera_mid_offset_percent = 0.02 # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
# find the steering angle, which is angle between navigation direction to end of center line
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line
steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel
logging.debug('new steering angle: %s' % steering_angle)
return steering_angle
def stabilize_steering_angle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=5, max_angle_deviation_one_lane=1):
"""
Using last steering angle to stabilize the steering angle
This can be improved to use last N angles, etc
if new angle is too different from current angle, only turn by max_angle_deviation degrees
"""
if num_of_lane_lines == 2 :
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
else :
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
logging.info('Proposed angle: %s, stabilized angle: %s' % (new_steering_angle, stabilized_steering_angle))
return stabilized_steering_angle
############################
# Utility Functions
############################
def throttle_angle_to_thrust(speed, theta):
try:
theta = ((theta + 180) % 360) - 180 # normalize value to [-180, 180)
speed = min(max(0, speed), 100) # normalize value to [0, 100]
v_a = speed * (45 - theta % 90) / 45 # falloff of main motor
v_b = min(100, 2 * speed + v_a, 2 * speed - v_a) # compensation of other motor
if theta < -90: return -v_b, -v_a
if theta < 0: return -v_a, v_b
if theta < 90: return v_b, v_a
return [v_a, -v_b]
except:
print('error')
def display_lines(frame, lines, line_color=(0, 255, 0), line_width=10):
line_image = np.zeros_like(frame)
if lines is not None:
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(line_image, (x1, y1), (x2, y2), line_color, line_width)
line_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
return line_image
def display_heading_line(frame, steering_angle, line_color=(0, 0, 255), line_width=5, ):
heading_image = np.zeros_like(frame)
height, width, _ = frame.shape
# figure out the heading line from steering angle
# heading line (x1,y1) is always center bottom of the screen
# (x2, y2) requires a bit of trigonometry
# Note: the steering angle of:
# 0-89 degree: turn left
# 90 degree: going straight
# 91-180 degree: turn right
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 2)
cv2.line(heading_image, (x1, y1), (x2, y2), line_color, line_width)
heading_image = cv2.addWeighted(frame, 0.8, heading_image, 1, 1)
return heading_image
def length_of_line_segment(line):
x1, y1, x2, y2 = line
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def show_image(title, frame, show=_SHOW_IMAGE):
if show:
cv2.imshow(title, frame)
def make_points(frame, line):
height, width, _ = frame.shape
slope, intercept = line
y1 = height # bottom of the frame
y2 = int(y1 * 1 / 2) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - intercept) / slope)))
x2 = max(-width, min(2 * width, int((y2 - intercept) / slope)))
return [[x1, y1, x2, y2]]
############################
# Test Functions
############################
def test_photo(file):
land_follower = B_OpenCV_Driver()
frame = cv2.imread(file)
combo_image = land_follower.follow_lane(frame)
show_image('final', combo_image, True)
cv2.waitKey(0)
cv2.destroyAllWindows()
def test_video(video_file):
lane_follower = B_OpenCV_Driver()
cap = cv2.VideoCapture(video_file)
# skip first second of video.
for i in range(60):
_, frame = cap.read()
#video_type = cv2.VideoWriter_fourcc(*'XVID')
#video_overlay = cv2.VideoWriter("%s_overlay.avi" % (video_file), video_type, 20.0, (320, 240))
try:
i = 0
while cap.isOpened():
_, frame = cap.read()
print('frame %s' % i )
combo_image, currentAngle = lane_follower.follow_lane(frame)
cv2.imwrite("camera.test.png", combo_image)
# hsv for purple boxes
#lower_purple = np.array([141, 59, 0]) <- campusData.mp4
#upper_purple = np.array([179, 255, 255])
# school track with new camera
lower_purple = np.array([121, 96, 0])
upper_purple = np.array([179, 255, 255])
# hsv for blue and yellow
#lower_blue = np.array([0, 76, 158])
#upper_blue = np.array([179, 255, 255])
#lower_yellow = np.array([141, 59, 0])
#upper_yellow = np.array([179, 255, 255])
obstacle_frame = frame.copy()
uturn_frame = frame.copy()
#hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#obstacle_mask = cv2.inRange(hsv, lower_purple, upper_purple)
#obstacle_blur = cv2.bilateralFilter(obstacle_mask, 9, 40, 40)
#obstacle_edges = cv2.Canny(obstacle_blur, 50, 100)
#uturn_mask_b = cv2.inRange(hsv, lower_blue, upper_blue)
#uturn_mask_y = cv2.inRange(hsv, lower_yellow, upper_yellow)
#uturn_mask = cv2.bitwise_or(uturn_mask_b, uturn_mask_y)
#uturn_blur = cv2.bilateralFilter(uturn_mask_b, 9, 40, 40)
#uturn_edges = cv2.Canny(uturn_blur, 50, 100)
# overrides for the current angle
#adj_ut, uturn_frame = ovr.detect_uturns(uturn_edges, uturn_frame)
#adj_ob, obstacle_frame = ovr.detect_obstacles(obstacle_edges, obstacle_frame)
#currentAngle += adj_ut + adj_ob
#currentAngle += adj_ob
#print(f"Angle Adjustment: {adj_ob}")
#time.sleep(0.01)
#pwm = throttle_angle_to_thrust(BASE_SPEED, currentAngle - 90)
#print(f"Left: {pwm[0]}, Right: {pwm[1]}")
# actually write values to motor
#driver.move(pwm[0], pwm[1])
#cv2.imwrite("%s_%03d_%03d.png" % (video_file, i, lane_follower.curr_steering_angle), frame)
#cv2.imwrite("%s_overlay_%03d.png" % (video_file, i), combo_image)
#video_overlay.write(combo_image)
#cv2.imshow("Road with Lane line", combo_image)
i += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
finally:
cap.release()
#video_overlay.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
logging.basicConfig(level=logging.ERROR)
#test_photo('/home/pi/DeepPiCar/driver/data/video/car_video_190427_110320_073.png')
#test_photo(sys.argv[1])
#test_video(gstreamer_pipeline(flip_method=2))
test_video(0)
#test_video("https://192.168.227.41:8080")
#test_video("data/TestTrack.mp4")