diff --git a/peerjs-config.yaml b/peerjs-config.yaml deleted file mode 100644 index 2a1b6f2f..00000000 --- a/peerjs-config.yaml +++ /dev/null @@ -1,7 +0,0 @@ -host: ambianic-pnp.herokuapp.com -ice_servers: -- urls: - - stun:stun.l.google.com:19302 -log_level: INFO -port: 443 -secure: true diff --git a/src/ambianic/pipeline/ai/face_detect.py b/src/ambianic/pipeline/ai/face_detect.py index f72866de..16602a12 100755 --- a/src/ambianic/pipeline/ai/face_detect.py +++ b/src/ambianic/pipeline/ai/face_detect.py @@ -16,10 +16,10 @@ def crop_image(image, box): width, height = image.size # Setting the points for cropped image - left = box['xmin']*width - top = box['ymin']*height - right = box['xmax']*width - bottom = box['ymax']*height + left = box[0]*width + top = box[1]*height + right = box[2]*width + bottom = box[3]*height # Cropped image of above dimension # (It will not change orginal image) @@ -46,13 +46,9 @@ def process_sample(self, **sample): else: # - apply face detection to cropped person areas # - pass face detections on to next pipe element - for e_result in prev_inference_result: - label, confidence, box = e_result['label'], \ - e_result['confidence'], \ - e_result['box'] - + for label, confidence, box in prev_inference_result: if label == 'person' and \ - confidence >= self._tfengine.confidence_threshold: + confidence >= self._tfengine.confidence_threshold: person_regions.append(box) log.debug('Received %d person boxes for face detection', len(person_regions)) @@ -60,9 +56,6 @@ def process_sample(self, **sample): person_image = self.crop_image(image, box) thumbnail, tensor_image, inference_result = \ self.detect(image=person_image) - - inference_result = self.convert_inference_result( - inference_result) log.debug('Face detection inference_result: %r', inference_result) inf_meta = { @@ -80,27 +73,3 @@ def process_sample(self, **sample): 'Dropping sample: %r', e, sample) - - def convert_inference_result(self, inference_result): - - inf_json = [] - if inference_result: - for inf in inference_result: - label, confidence, box = inf[0:3] - log.info('label: %s , confidence: %.0f, box: %s', - label, - confidence, - box) - one_inf = { - 'label': label, - 'confidence': float(confidence), - 'box': { - 'xmin': float(box[0]), - 'ymin': float(box[1]), - 'xmax': float(box[2]), - 'ymax': float(box[3]), - } - } - inf_json.append(one_inf) - - return inf_json diff --git a/src/ambianic/pipeline/ai/fall_detect.py b/src/ambianic/pipeline/ai/fall_detect.py index cab8df61..bb46c27a 100755 --- a/src/ambianic/pipeline/ai/fall_detect.py +++ b/src/ambianic/pipeline/ai/fall_detect.py @@ -67,17 +67,16 @@ def __init__(self, self._pose_engine = PoseEngine(self._tfengine, context=self.context) self._fall_factor = 60 self.confidence_threshold = confidence_threshold - log.debug(f"Initializing FallDetector with conficence threshold: \ - {self.confidence_threshold}") + log.debug(f"Initializing FallDetector with conficence threshold: {self.confidence_threshold}") # Require a minimum amount of time between two video frames in seconds. - # Otherwise on high performing hard, the poses could be too close to + # Otherwise on high performing hard, the poses could be too close to # each other and have negligible difference # for fall detection purpose. self.min_time_between_frames = 1 - # Require the time distance between two video frames not to exceed + # Require the time distance between two video frames not to exceed # a certain limit in seconds. - # Otherwise there could be data noise which could lead + # Otherwise there could be data noise which could lead # false positive detections. self.max_time_between_frames = 10 @@ -100,8 +99,6 @@ def process_sample(self, **sample): try: image = sample['image'] inference_result, thumbnail = self.fall_detect(image=image) - inference_result = self.convert_inference_result( - inference_result) inf_meta = { 'display': 'Fall Detection', } @@ -116,8 +113,9 @@ def process_sample(self, **sample): except Exception as e: log.exception('Error "%s" while processing sample. ' 'Dropping sample: %s', - str(e), - str(sample)) + str(e), + str(sample) + ) def calculate_angle(self, p): ''' @@ -160,60 +158,52 @@ def is_body_line_motion_downward(self, left_angle_with_yaxis, def find_keypoints(self, image): - # this score value should be related to the configuration \ - # confidence_threshold parameter + # this score value should be related to the configuration confidence_threshold parameter min_score = self.confidence_threshold rotations = [Image.ROTATE_270, Image.ROTATE_90] angle = 0 pose = None poses, thumbnail, _ = self._pose_engine.detect_poses(image) width, height = thumbnail.size - # if no pose detected with high confidence, + # if no pose detected with high confidence, # try rotating the image +/- 90' to find a fallen person - # currently only looking at pose[0] because we are focused \ - # on a lone person falls - # while (not poses or poses[0].score < min_score) and rotations: - spinal_vector_score, pose_dix = self.estimate_spinal_vector_score( - poses[0]) + # currently only looking at pose[0] because we are focused on a lone person falls + #while (not poses or poses[0].score < min_score) and rotations: + spinal_vector_score, pose_dix = self.estimate_spinal_vector_score(poses[0]) while spinal_vector_score < min_score and rotations: - angle = rotations.pop() - transposed = image.transpose(angle) - # we are interested in the poses but not the rotated thumbnail - poses, _, _ = self._pose_engine.detect_poses(transposed) - spinal_vector_score, pose_dix = self.estimate_spinal_vector_score( - poses[0]) + angle = rotations.pop() + transposed = image.transpose(angle) + # we are interested in the poses but not the rotated thumbnail + poses, _, _ = self._pose_engine.detect_poses(transposed) + spinal_vector_score, pose_dix = self.estimate_spinal_vector_score(poses[0]) if poses and poses[0]: pose = poses[0] # lets check if we found a good pose candidate - + if (pose and spinal_vector_score >= min_score): - # if the image was rotated, we need to rotate back to the original\ - # image coordinates + # if the image was rotated, we need to rotate back to the original image coordinates # before comparing with poses in other frames. if angle == Image.ROTATE_90: - # ROTATE_90 rotates 90' counter clockwise \ - # from ^ to < orientation. + # ROTATE_90 rotates 90' counter clockwise from ^ to < orientation. for _, keypoint in pose.keypoints.items(): # keypoint.yx[0] is the x coordinate in an image - # keypoint.yx[0] is the y coordinate in an image, \ - # with 0,0 in the upper left corner (not lower left). + # keypoint.yx[0] is the y coordinate in an image, with 0,0 in the upper left corner (not lower left). tmp_swap = keypoint.yx[0] keypoint.yx[0] = width-keypoint.yx[1] keypoint.yx[1] = tmp_swap - elif angle == Image.ROTATE_270: - # ROTATE_270 rotates 90' clockwise from ^ to > orientation. + elif angle == Image.ROTATE_270: + # ROTATE_270 rotates 90' clockwise from ^ to > orientation. for _, keypoint in pose.keypoints.items(): tmp_swap = keypoint.yx[0] keypoint.yx[0] = keypoint.yx[1] keypoint.yx[1] = height-tmp_swap # we could not detexct a pose with sufficient confidence - log.info(f"""A pose detected with - spinal_vector_score={spinal_vector_score} >= {min_score} - confidence threshold. - Pose keypoints: {pose_dix}" - """) + log.info(f"""A pose detected with spinal_vector_score={spinal_vector_score} >= {min_score} confidence threshold. + Pose keypoints: {pose_dix}" + """ + ) else: pose = None @@ -221,17 +211,15 @@ def find_keypoints(self, image): def find_changes_in_angle(self, pose_dix, inx): ''' - Find the changes in angle for shoulder-hip lines - b/w current and previpus frame. + Find the changes in angle for shoulder-hip lines b/w current and previpus frame. ''' - prev_leftLine_corr_exist = all(e in self._prev_data[inx][self.POSE_VAL] - for e in [self.LEFT_SHOULDER, self.LEFT_HIP]) - curr_leftLine_corr_exist = all(e in pose_dix for e in [self.LEFT_SHOULDER,self.LEFT_HIP]) + prev_leftLine_corr_exist = all(e in self._prev_data[inx][self.POSE_VAL] for e in [self.LEFT_SHOULDER, self.LEFT_HIP]) + curr_leftLine_corr_exist = all(e in pose_dix for e in [self.LEFT_SHOULDER, self.LEFT_HIP]) prev_rightLine_corr_exist = all(e in self._prev_data[inx][self.POSE_VAL] for e in [self.RIGHT_SHOULDER, self.RIGHT_HIP]) curr_rightLine_corr_exist = all(e in pose_dix for e in [self.RIGHT_SHOULDER, self.RIGHT_HIP]) - + left_angle = right_angle = 0 if prev_leftLine_corr_exist and curr_leftLine_corr_exist: @@ -241,26 +229,27 @@ def find_changes_in_angle(self, pose_dix, inx): left_angle = self.calculate_angle(temp_left_vector) log.debug("Left shoulder-hip angle: %r", left_angle) + if prev_rightLine_corr_exist and curr_rightLine_corr_exist: temp_right_vector = [[self._prev_data[inx][self.POSE_VAL][self.RIGHT_SHOULDER], - self._prev_data[inx][self.POSE_VAL][self.RIGHT_HIP]], - [pose_dix[self.RIGHT_SHOULDER], pose_dix[self.RIGHT_HIP]]] + self._prev_data[inx][self.POSE_VAL][self.RIGHT_HIP]], + [pose_dix[self.RIGHT_SHOULDER], pose_dix[self.RIGHT_HIP]]] right_angle = self.calculate_angle(temp_right_vector) log.debug("Right shoulder-hip angle: %r", right_angle) + angle_change = max(left_angle, right_angle) return angle_change - def assign_prev_records(self, pose_dix, left_angle_with_yaxis, - rigth_angle_with_yaxis, now, thumbnail, - current_body_vector_score): + def assign_prev_records(self, pose_dix, left_angle_with_yaxis, rigth_angle_with_yaxis, now, thumbnail, current_body_vector_score): curr_data = {self.POSE_VAL: pose_dix, - self.TIMESTAMP: now, - self.THUMBNAIL: thumbnail, - self.LEFT_ANGLE_WITH_YAXIS: left_angle_with_yaxis, - self.RIGHT_ANGLE_WITH_YAXIS: rigth_angle_with_yaxis, - self.BODY_VECTOR_SCORE: current_body_vector_score} + self.TIMESTAMP: now, + self.THUMBNAIL: thumbnail, + self.LEFT_ANGLE_WITH_YAXIS: left_angle_with_yaxis, + self.RIGHT_ANGLE_WITH_YAXIS: rigth_angle_with_yaxis, + self.BODY_VECTOR_SCORE: current_body_vector_score + } self._prev_data[-2] = self._prev_data[-1] self._prev_data[-1] = curr_data @@ -276,14 +265,12 @@ def draw_lines(self, thumbnail, pose_dix, score): return body_lines_drawn if pose_dix.keys() >= {self.LEFT_SHOULDER, self.LEFT_HIP}: - body_line = [tuple(pose_dix[self.LEFT_SHOULDER]), - tuple(pose_dix[self.LEFT_HIP])] + body_line = [tuple(pose_dix[self.LEFT_SHOULDER]), tuple(pose_dix[self.LEFT_HIP])] draw.line(body_line, fill='red') body_lines_drawn += 1 if pose_dix.keys() >= {self.RIGHT_SHOULDER, self.RIGHT_HIP}: - body_line = [tuple(pose_dix[self.RIGHT_SHOULDER]), - tuple(pose_dix[self.RIGHT_HIP])] + body_line = [tuple(pose_dix[self.RIGHT_SHOULDER]), tuple(pose_dix[self.RIGHT_HIP])] draw.line(body_line, fill='red') body_lines_drawn += 1 @@ -301,22 +288,18 @@ def get_line_angles_with_yaxis(self, pose_dix): Find the angle b/w shoulder-hip line with yaxis. ''' y_axis_corr = [[0, 0], [0, self._pose_engine._tensor_image_height]] - + leftLine_corr_exist = all(e in pose_dix for e in [self.LEFT_SHOULDER, self.LEFT_HIP]) rightLine_corr_exist = all(e in pose_dix for e in [self.RIGHT_SHOULDER, self.RIGHT_HIP]) l_angle = r_angle = 0 if leftLine_corr_exist: - l_angle = self.calculate_angle([y_axis_corr, - [pose_dix[self.LEFT_SHOULDER], - pose_dix[self.LEFT_HIP]]]) - + l_angle = self.calculate_angle([y_axis_corr, [pose_dix[self.LEFT_SHOULDER], pose_dix[self.LEFT_HIP]]]) + if rightLine_corr_exist: - r_angle = self.calculate_angle([y_axis_corr, - [pose_dix[self.RIGHT_SHOULDER], - pose_dix[self.RIGHT_HIP]]]) - + r_angle = self.calculate_angle([y_axis_corr, [pose_dix[self.RIGHT_SHOULDER], pose_dix[self.RIGHT_HIP]]]) + return (l_angle, r_angle) def estimate_spinal_vector_score(self, pose): @@ -324,51 +307,39 @@ def estimate_spinal_vector_score(self, pose): is_leftVector = is_rightVector = False # Calculate leftVectorScore & rightVectorScore - leftVectorScore = min(pose.keypoints[self.LEFT_SHOULDER].score, - pose.keypoints[self.LEFT_HIP].score) - rightVectorScore = min(pose.keypoints[self.RIGHT_SHOULDER].score, - pose.keypoints[self.RIGHT_HIP].score) + leftVectorScore = min(pose.keypoints[self.LEFT_SHOULDER].score, pose.keypoints[self.LEFT_HIP].score) + rightVectorScore = min(pose.keypoints[self.RIGHT_SHOULDER].score, pose.keypoints[self.RIGHT_HIP].score) if leftVectorScore > self.confidence_threshold: is_leftVector = True - pose_dix[self.LEFT_SHOULDER] = \ - pose.keypoints[self.LEFT_SHOULDER].yx + pose_dix[self.LEFT_SHOULDER] = pose.keypoints[self.LEFT_SHOULDER].yx pose_dix[self.LEFT_HIP] = pose.keypoints[self.LEFT_HIP].yx if rightVectorScore > self.confidence_threshold: is_rightVector = True - pose_dix[self.RIGHT_SHOULDER] = \ - pose.keypoints[self.RIGHT_SHOULDER].yx + pose_dix[self.RIGHT_SHOULDER] = pose.keypoints[self.RIGHT_SHOULDER].yx pose_dix[self.RIGHT_HIP] = pose.keypoints[self.RIGHT_HIP].yx def find_spinalLine(): - left_spinal_x1 = (pose_dix[self.LEFT_SHOULDER][0] + - pose_dix[self.RIGHT_SHOULDER][0]) / 2 - left_spinal_y1 = (pose_dix[self.LEFT_SHOULDER][1] + - pose_dix[self.RIGHT_SHOULDER][1]) / 2 + left_spinal_x1 = (pose_dix[self.LEFT_SHOULDER][0] + pose_dix[self.RIGHT_SHOULDER][0]) / 2 + left_spinal_y1 = (pose_dix[self.LEFT_SHOULDER][1] + pose_dix[self.RIGHT_SHOULDER][1]) / 2 + + right_spinal_x1 = (pose_dix[self.LEFT_HIP][0] + pose_dix[self.RIGHT_HIP][0]) / 2 + right_spinal_y1 = (pose_dix[self.LEFT_HIP][1] + pose_dix[self.RIGHT_HIP][1]) / 2 - right_spinal_x1 = (pose_dix[self.LEFT_HIP][0] + - pose_dix[self.RIGHT_HIP][0]) / 2 - right_spinal_y1 = (pose_dix[self.LEFT_HIP][1] + - pose_dix[self.RIGHT_HIP][1]) / 2 + return (left_spinal_x1, left_spinal_y1), (right_spinal_x1, right_spinal_y1) - return (left_spinal_x1, left_spinal_y1), \ - (right_spinal_x1, right_spinal_y1) if is_leftVector and is_rightVector: spinalVectorEstimate = find_spinalLine() spinalVectorScore = (leftVectorScore + rightVectorScore) / 2.0 elif is_leftVector: - spinalVectorEstimate = pose_dix[self.LEFT_SHOULDER], \ - pose_dix[self.LEFT_HIP] - # 10% score penalty in conficence as only \ - # left shoulder-hip line is detected + spinalVectorEstimate = pose_dix[self.LEFT_SHOULDER], pose_dix[self.LEFT_HIP] + # 10% score penalty in conficence as only left shoulder-hip line is detected spinalVectorScore = leftVectorScore * 0.9 elif is_rightVector: - spinalVectorEstimate = pose_dix[self.RIGHT_SHOULDER], \ - pose_dix[self.RIGHT_HIP] - # 10% score penalty in conficence as only \ - # right shoulder-hip line is detected + spinalVectorEstimate = pose_dix[self.RIGHT_SHOULDER], pose_dix[self.RIGHT_HIP] + # 10% score penalty in conficence as only right shoulder-hip line is detected spinalVectorScore = rightVectorScore * 0.9 else: spinalVectorScore = 0 @@ -384,115 +355,61 @@ def fall_detect(self, image=None): now = time.monotonic() lapse = now - self._prev_data[-1][self.TIMESTAMP] - if self._prev_data[-1][self.POSE_VAL] \ - and lapse < self.min_time_between_frames: - log.debug("Received an image frame too soon after the previous \ - frame. Only %.2f ms apart.\ - Minimum %.2f ms distance required for fall detection.", - lapse, self.min_time_between_frames) + if self._prev_data[-1][self.POSE_VAL] and lapse < self.min_time_between_frames: + log.debug("Received an image frame too soon after the previous frame. Only %.2f ms apart.\ + Minimum %.2f ms distance required for fall detection.", lapse, self.min_time_between_frames) inference_result = None thumbnail = self._prev_data[-1][self.THUMBNAIL] else: # Detection using tensorflow posenet module - pose, thumbnail, spinal_vector_score, pose_dix = \ - self.find_keypoints(image) + pose, thumbnail, spinal_vector_score, pose_dix = self.find_keypoints(image) inference_result = None if not pose: - log.debug(f"No pose detected or detection score does not meet \ - confidence threshold of {self.confidence_threshold}.") + log.debug(f"No pose detected or detection score does not meet confidence threshold of {self.confidence_threshold}.") else: inference_result = [] current_body_vector_score = spinal_vector_score # Find line angle with vertcal axis - left_angle_with_yaxis, rigth_angle_with_yaxis = \ - self.get_line_angles_with_yaxis(pose_dix) + left_angle_with_yaxis, rigth_angle_with_yaxis = self.get_line_angles_with_yaxis(pose_dix) # save an image with drawn lines for debugging - if log.getEffectiveLevel() <= logging.DEBUG: - # development mode + if log.getEffectiveLevel() <= logging.DEBUG: # development mode self.draw_lines(thumbnail, pose_dix, spinal_vector_score) for t in [-1, -2]: lapse = now - self._prev_data[t][self.TIMESTAMP] - if not self._prev_data[t][self.POSE_VAL] or \ - lapse > self.max_time_between_frames: - log.debug("No recent pose to compare to. Will save \ - this frame pose for subsequent comparison.") - elif not self.is_body_line_motion_downward( - left_angle_with_yaxis, - rigth_angle_with_yaxis, - inx=t): - log.debug("The body-line angle with vertical axis is \ - decreasing from the previous frame. \ - Not likely to be a fall.") + if not self._prev_data[t][self.POSE_VAL] or lapse > self.max_time_between_frames: + log.debug("No recent pose to compare to. Will save this frame pose for subsequent comparison.") + elif not self.is_body_line_motion_downward(left_angle_with_yaxis, rigth_angle_with_yaxis, inx=t): + log.debug("The body-line angle with vertical axis is decreasing from the previous frame. Not likely to be a fall.") else: - leaning_angle = self.find_changes_in_angle(pose_dix, - inx=t) + leaning_angle = self.find_changes_in_angle(pose_dix, inx=t) - # Get leaning_probability by comparing leaning_angle - # with fall_factor probability. - leaning_probability = 1 \ - if leaning_angle > self._fall_factor else 0 + # Get leaning_probability by comparing leaning_angle with fall_factor probability. + leaning_probability = 1 if leaning_angle > self._fall_factor else 0 - # Calculate fall score using average of current and \ - # previous frame's body vector score with \ - # leaning_probability - fall_score = leaning_probability * \ - (self._prev_data[t][self.BODY_VECTOR_SCORE] + - current_body_vector_score) / 2 + # Calculate fall score using average of current and previous frame's body vector score with leaning_probability + fall_score = leaning_probability * (self._prev_data[t][self.BODY_VECTOR_SCORE] + current_body_vector_score) / 2 if fall_score >= self.confidence_threshold: - inference_result.append(('FALL', fall_score, - leaning_angle, pose_dix)) + # insert a box that covers the whole image as a workaround + # to meet the expected format of the save_detections element + box = [0, 0, 1, 1] + inference_result.append(('FALL', fall_score, box, leaning_angle)) log.info("Fall detected: %r", inference_result) break else: - log.debug(f"No fall detected due to low \ - confidence score: \ - {fall_score} < {self.confidence_threshold} \ - min threshold.Inference result: {inference_result}") + log.debug(f"No fall detected due to low confidence score: {fall_score} < {self.confidence_threshold} min threshold. Inference result: {inference_result}") log.debug("Saving pose for subsequent comparison.") - self.assign_prev_records(pose_dix, left_angle_with_yaxis, - rigth_angle_with_yaxis, now, - thumbnail, - current_body_vector_score) + self.assign_prev_records(pose_dix, left_angle_with_yaxis, rigth_angle_with_yaxis, now, thumbnail, current_body_vector_score) # log.debug("Logging stats") self.log_stats(start_time=start_time) - log.debug("thumbnail: %r", thumbnail) + log.debug("thumbnail: %r", thumbnail) return inference_result, thumbnail - - def convert_inference_result(self, inference_result): - inf_json = [] - - if inference_result: - for inf in inference_result: - label, confidence, leaning_angle, keypoint_corr = inf - log.info('label: %s , confidence: %.0f, leaning_angle: %.0f, \ - keypoint_corr: %s', - label, - confidence, - leaning_angle, - keypoint_corr) - one_inf = { - 'label': label, - 'confidence': float(confidence), - 'leaning_angle': float(leaning_angle), - 'keypoint_corr': { - 'left shoulder': keypoint_corr.get('left shoulder', - None), - 'left hip': keypoint_corr.get('left hip', None), - 'right shoulder': keypoint_corr.get('right shoulder', - None), - 'right hip': keypoint_corr.get('right hip', None) - } - } - inf_json.append(one_inf) - - return inf_json diff --git a/src/ambianic/pipeline/ai/image_boundingBox_detection.py b/src/ambianic/pipeline/ai/image_boundingBox_detection.py index eb5fdfc8..906c6d93 100644 --- a/src/ambianic/pipeline/ai/image_boundingBox_detection.py +++ b/src/ambianic/pipeline/ai/image_boundingBox_detection.py @@ -28,6 +28,7 @@ def __init__(self, super().__init__(model, **kwargs) + def detect(self, image=None): """Detect objects in image. @@ -57,8 +58,7 @@ def detect(self, image=None): desired_size = (width, height) - new_im, thumbnail = self.resize_to_input_tensor(image=image, - desired_size=desired_size) + new_im, thumbnail = self.resize_to_input_tensor(image=image, desired_size=desired_size) # calculate what fraction of the new image is the thumbnail size # we will use these factors to adjust detection box coordinates @@ -142,13 +142,13 @@ def detect(self, image=None): x1 = min(box[3] / w_factor, 1) y1 = min(box[2] / h_factor, 1) log.debug('thumbnail image size: %r , ' - 'tensor image size: %r', - thumbnail.size, - new_im.size) + 'tensor image size: %r', + thumbnail.size, + new_im.size) log.debug('resizing detection box (x0, y0, x1, y1) ' - 'from: %r to %r', - (box[1], box[0], box[3], box[2]), - (x0, y0, x1, y1)) + 'from: %r to %r', + (box[1], box[0], box[3], box[2]), + (x0, y0, x1, y1)) inference_result.append(( label, confidence, diff --git a/src/ambianic/pipeline/ai/object_detect.py b/src/ambianic/pipeline/ai/object_detect.py index 6c6e213b..3f5ceaca 100755 --- a/src/ambianic/pipeline/ai/object_detect.py +++ b/src/ambianic/pipeline/ai/object_detect.py @@ -20,9 +20,6 @@ def process_sample(self, **sample): image = sample['image'] thumbnail, tensor_image, inference_result = \ self.detect(image=image) - - inference_result = self.convert_inference_result( - inference_result) log.debug('Object detection inference_result: %r', inference_result) inf_meta = { @@ -43,27 +40,3 @@ def process_sample(self, **sample): str(sample) ) log.warning(stacktrace()) - - def convert_inference_result(self, inference_result): - - inf_json = [] - if inference_result: - for inf in inference_result: - label, confidence, box = inf[0:3] - log.info('label: %s , confidence: %.0f, box: %s', - label, - confidence, - box) - one_inf = { - 'label': label, - 'confidence': float(confidence), - 'box': { - 'xmin': float(box[0]), - 'ymin': float(box[1]), - 'xmax': float(box[2]), - 'ymax': float(box[3]), - } - } - inf_json.append(one_inf) - - return inf_json diff --git a/src/ambianic/pipeline/ai/pose_engine.py b/src/ambianic/pipeline/ai/pose_engine.py index 694f9041..d231f06c 100644 --- a/src/ambianic/pipeline/ai/pose_engine.py +++ b/src/ambianic/pipeline/ai/pose_engine.py @@ -28,7 +28,6 @@ 'right ankle' ) - class Keypoint: __slots__ = ['k', 'yx', 'score'] @@ -67,26 +66,23 @@ def __init__(self, tfengine=None, context=None): self._tfengine = tfengine self._input_tensor_shape = self.get_input_tensor_shape() - _, self._tensor_image_height, self._tensor_image_width, self._tensor_image_depth = \ - self.get_input_tensor_shape() - + self.get_input_tensor_shape() self.confidence_threshold = self._tfengine.confidence_threshold - log.debug(f"Initializing PoseEngine with confidence threshold \ - {self.confidence_threshold}") + log.debug(f"Initializing PoseEngine with confidence threshold {self.confidence_threshold}") + def get_input_tensor_shape(self): """Get the shape of the input tensor structure. Gets the shape required for the input tensor. - For models trained for image classification / detection, the shape is - always [1, height, width, channels]. - To be used as input for :func:`run_inference`, - this tensor shape must be flattened into a 1-D array with size - ``height * width * channels``. To instead get that 1-D array size, use + For models trained for image classification / detection, the shape is always + [1, height, width, channels]. To be used as input for :func:`run_inference`, + this tensor shape must be flattened into a 1-D array with size ``height * + width * channels``. To instead get that 1-D array size, use :func:`required_input_array_size`. Returns: - A 1-D array (:obj:`numpy.ndarray`) representing the required input - tensor shape. + A 1-D array (:obj:`numpy.ndarray`) representing the required input tensor + shape. """ return self._tfengine.input_details[0]['shape'] @@ -96,24 +92,20 @@ def parse_output(self, heatmap_data, offset_data, threshold): for i in range(heatmap_data.shape[-1]): - joint_heatmap = heatmap_data[..., i] - max_val_pos = np.squeeze( - np.argwhere(joint_heatmap == np.max(joint_heatmap))) - remap_pos = np.array(max_val_pos/8*self._tensor_image_height, - dtype=np.int32) - pose_kps[i, 0] = int(remap_pos[0] + offset_data[max_val_pos[0], - max_val_pos[1], i]) - pose_kps[i, 1] = int(remap_pos[1] + offset_data[max_val_pos[0], - max_val_pos[1], i+joint_num]) + joint_heatmap = heatmap_data[...,i] + max_val_pos = np.squeeze(np.argwhere(joint_heatmap == np.max(joint_heatmap))) + remap_pos = np.array(max_val_pos/8*self._tensor_image_height, dtype=np.int32) + pose_kps[i, 0] = int(remap_pos[0] + offset_data[max_val_pos[0], max_val_pos[1], i]) + pose_kps[i, 1] = int(remap_pos[1] + offset_data[max_val_pos[0], max_val_pos[1], i+joint_num]) max_prob = np.max(joint_heatmap) pose_kps[i, 3] = max_prob if max_prob > threshold: - if pose_kps[i, 0] < self._tensor_image_height and \ - pose_kps[i, 1] < self._tensor_image_width: + if pose_kps[i, 0] < self._tensor_image_height and pose_kps[i, 1] < self._tensor_image_width: pose_kps[i, 2] = 1 return pose_kps + def sigmoid(self, x): return 1 / (1 + np.exp(-x)) @@ -137,39 +129,31 @@ def detect_poses(self, img): Resized image fitting the AI model input tensor. """ - _tensor_input_size = (self._tensor_image_width, - self._tensor_image_height) + _tensor_input_size = (self._tensor_image_width, self._tensor_image_height) # thumbnail is a proportionately resized image - thumbnail = TFDetectionModel.thumbnail(image=img, - desired_size=_tensor_input_size) + thumbnail = TFDetectionModel.thumbnail(image=img, desired_size=_tensor_input_size) # convert thumbnail into an image with the exact size - # as the input tensor preserving proportions by padding with - # a solid color as needed - template_image = TFDetectionModel.resize(image=thumbnail, - desired_size=_tensor_input_size) - + # as the input tensor preserving proportions by padding with a solid color as needed + template_image = TFDetectionModel.resize(image=thumbnail, desired_size=_tensor_input_size) + template_input = np.expand_dims(template_image.copy(), axis=0) floating_model = self._tfengine.input_details[0]['dtype'] == np.float32 if floating_model: template_input = (np.float32(template_input) - 127.5) / 127.5 - self.tf_interpreter().\ - set_tensor(self._tfengine.input_details[0]['index'], - template_input) + self.tf_interpreter().set_tensor(self._tfengine.input_details[0]['index'], template_input) self.tf_interpreter().invoke() - template_output_data = self.tf_interpreter().\ - get_tensor(self._tfengine.output_details[0]['index']) - template_offset_data = self.tf_interpreter().\ - get_tensor(self._tfengine.output_details[1]['index']) + template_output_data = self.tf_interpreter().get_tensor(self._tfengine.output_details[0]['index']) + template_offset_data = self.tf_interpreter().get_tensor(self._tfengine.output_details[1]['index']) template_heatmaps = np.squeeze(template_output_data) template_offsets = np.squeeze(template_offset_data) - + kps = self.parse_output(template_heatmaps, template_offsets, 0.3) - + poses = [] keypoint_dict = {} @@ -179,30 +163,25 @@ def detect_poses(self, img): for point_i in range(keypoint_count): x, y = kps[point_i, 1], kps[point_i, 0] prob = self.sigmoid(kps[point_i, 3]) - + if prob > self.confidence_threshold: cnt += 1 - if log.getEffectiveLevel() <= logging.DEBUG: - # development mode - # draw on image and save it for debugging + if log.getEffectiveLevel() <= logging.DEBUG: # development mode + #draw on image and save it for debugging draw = ImageDraw.Draw(template_image) - draw.line(((0, 0), (x, y)), fill='blue') + draw.line(((0,0), (x, y)), fill='blue') - keypoint = Keypoint(KEYPOINTS[point_i], [x, y], prob) + keypoint = Keypoint(KEYPOINTS[point_i], [x, y], prob) keypoint_dict[KEYPOINTS[point_i]] = keypoint - # overall pose score is calculated as the average of all - # individual keypoint scores + # overall pose score is calculated as the average of all individual keypoint scores pose_score = cnt/keypoint_count log.debug(f"Overall pose score (keypoint score average): {pose_score}") poses.append(Pose(keypoint_dict, pose_score)) - if cnt > 0 and log.getEffectiveLevel() <= logging.DEBUG: - # development mode + if cnt > 0 and log.getEffectiveLevel() <= logging.DEBUG: # development mode # save template_image for debugging timestr = int(time.monotonic()*1000) - log.debug(f"Detected a pose with {cnt} keypoints that score over \ - the minimum confidence threshold of \ - {self.confidence_threshold}.") + log.debug(f"Detected a pose with {cnt} keypoints that score over the minimum confidence threshold of {self.confidence_threshold}.") debug_image_file_name = \ f'tmp-pose-detect-image-time-{timestr}-keypoints-{cnt}.jpg' template_image.save( diff --git a/src/ambianic/pipeline/store.py b/src/ambianic/pipeline/store.py index ca8dd872..a5cd43c1 100755 --- a/src/ambianic/pipeline/store.py +++ b/src/ambianic/pipeline/store.py @@ -81,7 +81,6 @@ def _save_sample(self, thumbnail=None, inference_result=None, inference_meta=None): - time_prefix = inf_time.strftime("%Y%m%d-%H%M%S.%f%z-{suffix}.{fext}") image_file = time_prefix.format(suffix='image', fext='jpg') image_path = self._output_directory / image_file @@ -89,7 +88,25 @@ def _save_sample(self, thumbnail_path = self._output_directory / thumbnail_file json_file = time_prefix.format(suffix='inference', fext='json') json_path = self._output_directory / json_file - + inf_json = [] + if inference_result: + for inf in inference_result: + label, confidence, box = inf[0:3] + log.info('label: %s , confidence: %.0f, box: %s', + label, + confidence, + box) + one_inf = { + 'label': label, + 'confidence': float(confidence), + 'box': { + 'xmin': float(box[0]), + 'ymin': float(box[1]), + 'xmax': float(box[2]), + 'ymax': float(box[3]), + } + } + inf_json.append(one_inf) save_json = { 'id': uuid.uuid4().hex, 'datetime': inf_time.isoformat(), @@ -100,7 +117,7 @@ def _save_sample(self, # this will be important when resloving REST API data # file serving 'rel_dir': self._rel_data_dir, - 'inference_result': inference_result, + 'inference_result': inf_json, 'inference_meta': inference_meta } image.save(image_path) diff --git a/tests/pipeline/ai/test_face_detect.py b/tests/pipeline/ai/test_face_detect.py index f2afeeea..bd680f74 100644 --- a/tests/pipeline/ai/test_face_detect.py +++ b/tests/pipeline/ai/test_face_detect.py @@ -133,26 +133,17 @@ def sample_callback(image=None, inference_result=None, **kwargs): # bad sample face_detector.receive_next_sample( image=None, - inference_result=[{'label': 'person', 'confidence': 1, - 'box': {'xmin': -1, 'ymin': -2, - 'xmax': -3, 'ymax': -4}}] + inference_result=[('person', 1, [-1, -2, -3, -4]), ] ) # good sample img = _get_image(file_name='person-face.jpg') face_detector.receive_next_sample( image=img, - inference_result=[{'label': 'person', 'confidence': 1, - 'box': {'xmin': 0, 'ymin': 0, - 'xmax': 1, 'ymax': 1}}] + inference_result=[('person', 1, [0, 0, 1, 1]), ] ) assert result assert len(result) == 1 - - label = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] - + label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.8 assert x0 > 0 and x0 < x1 @@ -192,12 +183,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - - label = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] - + label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -228,10 +214,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - label = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.8 assert x0 > 0 and x0 < x1 @@ -244,10 +227,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - label = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.8 assert x0 > 0 and x0 < x1 @@ -271,10 +251,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - label = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.8 assert x0 > 0 and x0 < x1 @@ -306,10 +283,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - label = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -342,10 +316,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - label = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -370,18 +341,12 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 2 - label = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 - label = result[1]['label'] - confidence = result[1]['confidence'] - (x0, y0) = result[1]['box']['xmin'], result[1]['box']['ymin'] - (x1, y1) = result[1]['box']['xmax'], result[1]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = result[1] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -394,10 +359,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - label = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.6 assert x0 > 0 and x0 < x1 @@ -448,16 +410,11 @@ def sample_callback(image=None, inference_result=None, **kwargs): img = _get_image(file_name='person-face.jpg') face_detector.receive_next_sample( image=img, - inference_result=[{'label': 'person', 'confidence': 1, - 'box': {'xmin': 0, 'ymin': 0, - 'xmax': 1, 'ymax': 1}}] + inference_result=[('person', 1, [0, 0, 1, 1]), ] ) assert result assert len(result) == 1 - label = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = result[0] assert label == 'person' assert confidence > 0.8 assert x0 > 0 and x0 < x1 diff --git a/tests/pipeline/ai/test_fall_detect.py b/tests/pipeline/ai/test_fall_detect.py index 554293da..728f6f41 100755 --- a/tests/pipeline/ai/test_fall_detect.py +++ b/tests/pipeline/ai/test_fall_detect.py @@ -67,13 +67,11 @@ def test_model_inputs(): def test_fall_detection_thumbnail_present(): - """Expected to receive thumnail in result if image is provided \ - and poses are detected.""" + """Expected to receive thumnail in result if image is provided and poses are detected.""" config = _fall_detect_config() result = None - def sample_callback(image=None, thumbnail=None, inference_result=None, - **kwargs): + def sample_callback(image=None, thumbnail=None, inference_result=None, **kwargs): nonlocal result result = image is not None and thumbnail is not None and \ inference_result is not None @@ -112,11 +110,9 @@ def sample_callback(image=None, inference_result=None, **kwargs): fall_detector.receive_next_sample(image=img_2) assert not result - + def test_fall_detection_case_2_1(): - """Expected to not detect a fall even though key-points are detected - and the angle criteria is met. However the time distance between - frames is too short.""" + """Expected to not detect a fall even though key-points are detected and the angle criteria is met. However the time distance between frames is too short.""" config = _fall_detect_config() result = None @@ -140,20 +136,15 @@ def sample_callback(image=None, inference_result=None, **kwargs): fall_detector.receive_next_sample(image=img_1) end_time = time.monotonic() safe_min = end_time-start_time+1 - # set min time to a sufficiently big number to ensure test passes - # on slow environments - # the goal is to simulate two frames that are too close in time - # to be considered for a fall detection sequence + # set min time to a sufficiently big number to ensure test passes on slow environments + # the goal is to simulate two frames that are too close in time to be considered for a fall detection sequence fall_detector.min_time_between_frames = safe_min fall_detector.receive_next_sample(image=img_2) - assert not result - + assert result is None def test_fall_detection_case_2_2(): - """Expected to detect a fall because key-points are detected, - the angle criteria is met and the time distance between - frames is not too short.""" + """Expected to detect a fall because key-points are detected, the angle criteria is met and the time distance between frames is not too short.""" config = _fall_detect_config() result = None @@ -164,7 +155,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): fall_detector = FallDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) - + fall_detector.connect_to_next_element(output) # The frame represents a person who is in a standing position. @@ -180,20 +171,14 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert result assert len(result) == 1 - category = result[0]['label'] - confidence = result[0]['confidence'] - angle = result[0]['leaning_angle'] - keypoint_corr = result[0]['keypoint_corr'] - - assert keypoint_corr + category, confidence, box, angle = result[0] + assert box # Add this line to avoid 'Unused local variable' assert category == 'FALL' assert confidence > 0.7 assert angle > 60 - def test_fall_detection_case_3_1(): - """Expect to detect a fall as key-points are detected by - rotating the image clockwise.""" + """Expect to detect a fall as key-points are detected by rotating the image clockwise.""" config = _fall_detect_config() result = None @@ -204,7 +189,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): fall_detector = FallDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) - + fall_detector.connect_to_next_element(output) # The frame represents a person who is in a standing position. @@ -221,21 +206,15 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert result assert len(result) == 1 - - category = result[0]['label'] - confidence = result[0]['confidence'] - angle = result[0]['leaning_angle'] - keypoint_corr = result[0]['keypoint_corr'] - - assert keypoint_corr + category, confidence, box, angle = result[0] + assert box # Add this line to avoid 'Unused local variable' assert category == 'FALL' assert confidence > 0.3 assert angle > 60 def test_fall_detection_case_3_2(): - """Expect to detect a fall as key-points are detected - by rotating the image counter clockwise.""" + """Expect to detect a fall as key-points are detected by rotating the image counter clockwise.""" config = _fall_detect_config() result = None @@ -263,18 +242,12 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert result assert len(result) == 1 - - category = result[0]['label'] - confidence = result[0]['confidence'] - angle = result[0]['leaning_angle'] - keypoint_corr = result[0]['keypoint_corr'] - - assert keypoint_corr + category, confidence, box, angle = result[0] + assert box # Add this line to avoid 'Unused local variable' assert category == 'FALL' assert confidence > 0.3 assert angle > 60 - def test_fall_detection_case_4(): """No Fall""" config = _fall_detect_config() @@ -287,7 +260,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): fall_detector = FallDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) - + fall_detector.connect_to_next_element(output) # The frame represents a person who is in a standing position. @@ -305,8 +278,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): def test_fall_detection_case_5(): - """Expected to not detect a fall even the angle criteria is met - because image 2 is standing up rather than fall""" + """Expected to not detect a fall even the angle criteria is met because image 2 is standing up rather than fall""" config = _fall_detect_config() result = None @@ -335,8 +307,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): def test_fall_detection_case_6(): - """Expect to not detect a fall as in 1st image key-points are detected - but not in 2nd""" + """Expect to not detect a fall as in 1st image key-points are detected but not in 2nd""" config = _fall_detect_config() result = None @@ -364,7 +335,6 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert not result - def test_fall_detection_case_7(): """Expect to not detect a fall""" config = _fall_detect_config() @@ -394,7 +364,6 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert not result - def test_fall_detection_case_8(): """Expect to not detect a fall""" config = _fall_detect_config() @@ -430,11 +399,9 @@ def test_background_image(): config = _fall_detect_config() result = None - def sample_callback(image=None, thumbnail=None, inference_result=None, - **kwargs): + def sample_callback(image=None, thumbnail=None, inference_result=None, **kwargs): nonlocal result - result = image is not None and thumbnail is not None and \ - not inference_result + result = image is not None and thumbnail is not None and inference_result is None fall_detector = FallDetector(**config) output = _OutPipeElement(sample_callback=sample_callback) fall_detector.connect_to_next_element(output) @@ -494,13 +461,8 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert result assert len(result) == 1 - - category = result[0]['label'] - confidence = result[0]['confidence'] - angle = result[0]['leaning_angle'] - keypoint_corr = result[0]['keypoint_corr'] - - assert keypoint_corr + category, confidence, box, angle = result[0] + assert box # Add this line to avoid 'Unused local variable' assert category == 'FALL' assert confidence > 0.7 assert angle > 60 @@ -529,12 +491,10 @@ def test_draw_line_1(): fall_detector = FallDetector(**config) image = _get_image(file_name='fall_img_1.png') - pose_dix = {fall_detector.LEFT_SHOULDER: [0, 0], - fall_detector.LEFT_HIP: [0, 1]} + pose_dix = { fall_detector.LEFT_SHOULDER: [0,0], fall_detector.LEFT_HIP: [0,1]} lines_drawn = fall_detector.draw_lines(image, pose_dix, 0.5) assert lines_drawn == 1 - def test_draw_line_1_1(): """One keypoing but no full body line. No image should be saved.""" config = _fall_detect_config() @@ -542,11 +502,10 @@ def test_draw_line_1_1(): fall_detector = FallDetector(**config) image = _get_image(file_name='fall_img_1.png') - pose_dix = {fall_detector.LEFT_SHOULDER: [0, 0]} + pose_dix = { fall_detector.LEFT_SHOULDER: [0,0]} lines_drawn = fall_detector.draw_lines(image, pose_dix, 0.5) assert lines_drawn == 0 - def test_draw_line_2(): """Two body lines passed to draw. Image with two lines should be saved.""" config = _fall_detect_config() @@ -555,10 +514,7 @@ def test_draw_line_2(): # The frame represents a person who is in a standing position. image = _get_image(file_name='fall_img_1.png') - pose_dix = {fall_detector.LEFT_SHOULDER: [0, 0], - fall_detector.LEFT_HIP: [0, 1], - fall_detector.RIGHT_SHOULDER: [1, 0], - fall_detector.RIGHT_HIP: [1, 1]} + pose_dix = { fall_detector.LEFT_SHOULDER: [0,0], fall_detector.LEFT_HIP: [0,1], fall_detector.RIGHT_SHOULDER: [1,0], fall_detector.RIGHT_HIP: [1,1]} lines_drawn = fall_detector.draw_lines(image, pose_dix, 0.5) assert lines_drawn == 2 @@ -571,6 +527,7 @@ def test_fall_detection_2_frame_back_case_1(): frame[t] : A person is fall down. """ + config = _fall_detect_config() result = None @@ -586,8 +543,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): # A frame at t-2 timestamp when person is in standing position. img_1 = _get_image(file_name='fall_img_1.png') - # A frame at t-1 timestamp when person is almost in standing position \ - # as he is walking. + # A frame at t-1 timestamp when person is almost in standing position as he is walking. img_2 = _get_image(file_name='fall_img_1_1.png') # A frame at t timestamp when person falls down. @@ -607,13 +563,8 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert result assert len(result) == 1 - - category = result[0]['label'] - confidence = result[0]['confidence'] - angle = result[0]['leaning_angle'] - keypoint_corr = result[0]['keypoint_corr'] - - assert keypoint_corr + category, confidence, box, angle = result[0] + assert box # Add this line to avoid 'Unused local variable' assert category == 'FALL' assert confidence > 0.7 assert angle > 60 @@ -662,13 +613,8 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert result assert len(result) == 1 - - category = result[0]['label'] - confidence = result[0]['confidence'] - angle = result[0]['leaning_angle'] - keypoint_corr = result[0]['keypoint_corr'] - - assert keypoint_corr + category, confidence, box, angle = result[0] + assert box # Add this line to avoid 'Unused local variable' assert category == 'FALL' assert confidence > 0.7 assert angle > 60 @@ -676,7 +622,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): def test_fall_detection_2_frame_back_case_3(): """ - Expected to not detect a fall using frame[t],frame[t-1] and frame[t-2]. + Expected to not detect a fall using frame[t], frame[t-1] and frame[t-2]. frame[t-2] : A person is in walking postion. frame[t-1] : A person is in walking postion. frame[t] : A person is slight in lean postion but no fall. diff --git a/tests/pipeline/ai/test_fall_detect_more.py b/tests/pipeline/ai/test_fall_detect_more.py index 4e9a57ee..e3ee8f7f 100755 --- a/tests/pipeline/ai/test_fall_detect_more.py +++ b/tests/pipeline/ai/test_fall_detect_more.py @@ -11,7 +11,7 @@ _data_dir.mkdir(parents=True, exist_ok=True) -def test_model_inputs(): +def test_model_inputs(): """Verify against known model inputs.""" config = _fall_detect_config() fall_detector = FallDetector(**config) @@ -28,8 +28,7 @@ def test_model_inputs(): def test_config_confidence_threshold(): - """Verify against known confidence threshold. Make sure it propagates - at all levels.""" + """Verify against known confidence threshold. Make sure it propagates at all levels.""" config = _fall_detect_config() fall_detector = FallDetector(**config) tfe = fall_detector._tfengine @@ -57,8 +56,7 @@ def _helper_test_debug_image_save(context: PipelineContext = None): config = _fall_detect_config() result = None - def sample_callback(image=None, thumbnail=None, inference_result=None, - **kwargs): + def sample_callback(image=None, thumbnail=None, inference_result=None, **kwargs): nonlocal result result = image is not None and thumbnail is not None and \ inference_result is not None diff --git a/tests/pipeline/ai/test_object_detect.py b/tests/pipeline/ai/test_object_detect.py index 021d6e7b..16e357b2 100644 --- a/tests/pipeline/ai/test_object_detect.py +++ b/tests/pipeline/ai/test_object_detect.py @@ -112,12 +112,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - - category = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] - + category, confidence, (x0, y0, x1, y1) = result[0] assert category == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -140,12 +135,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - - category = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] - + category, confidence, (x0, y0, x1, y1) = result[0] assert category == 'person' assert confidence > 0.8 assert x0 > 0 and x0 < x1 @@ -186,12 +176,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - - category = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] - + category, confidence, (x0, y0, x1, y1) = result[0] assert category == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -214,12 +199,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - - category = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] - + category, confidence, (x0, y0, x1, y1) = result[0] assert category == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -245,12 +225,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 1 - - category = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] - + category, confidence, (x0, y0, x1, y1) = result[0] assert category == 'person' assert confidence > confidence_threshold assert x0 > 0 and x0 < x1 @@ -274,22 +249,12 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 2 - - category = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] - + category, confidence, (x0, y0, x1, y1) = result[0] assert category == 'person' assert confidence > 0.7 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 - - category = result[1]['label'] - confidence = result[1]['confidence'] - (x0, y0) = result[1]['box']['xmin'], result[1]['box']['ymin'] - (x1, y1) = result[1]['box']['xmax'], result[1]['box']['ymax'] - + category, confidence, (x0, y0, x1, y1) = result[1] assert category == 'couch' assert confidence > 0.6 assert x0 > 0 and x0 < x1 @@ -314,22 +279,12 @@ def sample_callback(image=None, inference_result=None, **kwargs): object_detector.receive_next_sample(image=img) assert result assert len(result) == 2 - - category = result[0]['label'] - confidence = result[0]['confidence'] - (x0, y0) = result[0]['box']['xmin'], result[0]['box']['ymin'] - (x1, y1) = result[0]['box']['xmax'], result[0]['box']['ymax'] - + category, confidence, (x0, y0, x1, y1) = result[0] assert category == 'person' assert confidence > 0.7 assert x0 > 0 and x0 < x1 assert y0 > 0 and y0 < y1 - - category = result[1]['label'] - confidence = result[1]['confidence'] - (x0, y0) = result[1]['box']['xmin'], result[1]['box']['ymin'] - (x1, y1) = result[1]['box']['xmax'], result[1]['box']['ymax'] - + category, confidence, (x0, y0, x1, y1) = result[1] assert category == 'couch' assert confidence > 0.6 assert x0 > 0 and x0 < x1 diff --git a/tests/pipeline/avsource/test_avsource.py b/tests/pipeline/avsource/test_avsource.py index a77be9d5..e2044979 100644 --- a/tests/pipeline/avsource/test_avsource.py +++ b/tests/pipeline/avsource/test_avsource.py @@ -18,6 +18,7 @@ log.setLevel(logging.DEBUG) + class _TestAVSourceElement(AVSourceElement): def __init__(self, **source_conf): @@ -146,8 +147,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: - label = detections[0]['label'] - confidence = detections[0]['confidence'] + label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence @@ -167,12 +167,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 - - label = detections[0]['label'] - confidence = detections[0]['confidence'] - (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] - (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] - + label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -247,8 +242,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: - label = detections[0]['label'] - confidence = detections[0]['confidence'] + label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence @@ -268,12 +262,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 - - label = detections[0]['label'] - confidence = detections[0]['confidence'] - (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] - (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] - + label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -307,8 +296,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: - label = detections[0]['label'] - confidence = detections[0]['confidence'] + label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence @@ -328,10 +316,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 - label = detections[0]['label'] - confidence = detections[0]['confidence'] - (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] - (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -375,8 +360,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: - label = detections[0]['label'] - confidence = detections[0]['confidence'] + label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence @@ -396,10 +380,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 - label = detections[0]['label'] - confidence = detections[0]['confidence'] - (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] - (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -514,8 +495,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: - label = detections[0]['label'] - confidence = detections[0]['confidence'] + label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence @@ -535,10 +515,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 - label = detections[0]['label'] - confidence = detections[0]['confidence'] - (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] - (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -619,8 +596,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: - label = detections[0]['label'] - confidence = detections[0]['confidence'] + label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence @@ -640,10 +616,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 - label = detections[0]['label'] - confidence = detections[0]['confidence'] - (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] - (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -739,8 +712,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: - label = detections[0]['label'] - confidence = detections[0]['confidence'] + label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence @@ -760,10 +732,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 - label = detections[0]['label'] - confidence = detections[0]['confidence'] - (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] - (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 diff --git a/tests/pipeline/avsource/test_avsource_http.py b/tests/pipeline/avsource/test_avsource_http.py index 9d0bc61d..c730ed7e 100644 --- a/tests/pipeline/avsource/test_avsource_http.py +++ b/tests/pipeline/avsource/test_avsource_http.py @@ -47,7 +47,6 @@ def _run_http_fetch(self, url=None, continuous=False): self._run_http_fetch_called = True super()._run_http_fetch(url=url, continuous=continuous) - class _OutPipeElement(PipeElement): def __init__(self, sample_callback=None): @@ -77,8 +76,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: - label = detections[0]['label'] - confidence = detections[0]['confidence'] + label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence @@ -100,10 +98,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 - label = detections[0]['label'] - confidence = detections[0]['confidence'] - (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] - (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -130,8 +125,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: - label = detections[0]['label'] - confidence = detections[0]['confidence'] + label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence @@ -151,10 +145,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 - label = detections[0]['label'] - confidence = detections[0]['confidence'] - (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] - (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 @@ -211,8 +202,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): print('detections: {det}'.format(det=detections)) print('len(detections): {len}'.format(len=len(detections))) if detections: - label = detections[0]['label'] - confidence = detections[0]['confidence'] + label, confidence, _ = detections[0] if label == 'person' and confidence > 0.9: # skip video image samples until we reach a person detection # with high level of confidence @@ -232,10 +222,7 @@ def sample_callback(image=None, inference_result=None, **kwargs): assert sample_image.size[1] == 720 assert detections assert len(detections) == 1 - label = detections[0]['label'] - confidence = detections[0]['confidence'] - (x0, y0) = detections[0]['box']['xmin'], detections[0]['box']['ymin'] - (x1, y1) = detections[0]['box']['xmax'], detections[0]['box']['ymax'] + label, confidence, (x0, y0, x1, y1) = detections[0] assert label == 'person' assert confidence > 0.9 assert x0 > 0 and x0 < x1 diff --git a/tests/pipeline/test_notify.py b/tests/pipeline/test_notify.py index b77fd5e4..6e06137b 100644 --- a/tests/pipeline/test_notify.py +++ b/tests/pipeline/test_notify.py @@ -102,20 +102,7 @@ def test_notification_with_attachments(): notify=notify, ) img = Image.new('RGB', (60, 30), color='red') - - detections = [ - { - 'label': 'person', - 'confidence': 0.98, - 'box': { - 'xmin': 0, - 'ymin': 1, - 'xmax': 2, - 'ymax': 3 - } - } - ] - + detections = [('person', 0.98, (0, 1, 2, 3))] processed_samples = list(store.process_sample(image=img, thumbnail=img, inference_result=detections)) @@ -152,20 +139,7 @@ def test_plain_notification(): notify=notify, ) img = Image.new('RGB', (60, 30), color='red') - - detections = [ - { - 'label': 'person', - 'confidence': 0.98, - 'box': { - 'xmin': 0, - 'ymin': 1, - 'xmax': 2, - 'ymax': 3 - } - } - ] - + detections = [('person', 0.98, (0, 1, 2, 3))] processed_samples = list(store.process_sample(image=img, thumbnail=img, inference_result=detections)) diff --git a/tests/pipeline/test_store.py b/tests/pipeline/test_store.py index 88159c0f..7a2b635c 100644 --- a/tests/pipeline/test_store.py +++ b/tests/pipeline/test_store.py @@ -54,20 +54,9 @@ def test_store_positive_detection(): store = _TestSaveDetectionSamples(context=context, event_log=logging.getLogger()) img = Image.new('RGB', (60, 30), color='red') - detections = [ - { - 'label': 'person', - 'confidence': 0.98, - 'box': { - 'xmin': 0, - 'ymin': 1, - 'xmax': 2, - 'ymax': 3 - } - } - ] - + ('person', 0.98, (0, 1, 2, 3)) + ] processed_samples = list(store.process_sample(image=img, thumbnail=img, inference_result=detections)) @@ -77,15 +66,10 @@ def test_store_positive_detection(): assert img_out == img inf = processed_samples[0]['inference_result'] print(inf) - - category = inf[0]['label'] - confidence = inf[0]['confidence'] - (x0, y0) = inf[0]['box']['xmin'], inf[0]['box']['ymin'] - (x1, y1) = inf[0]['box']['xmax'], inf[0]['box']['ymax'] - + category, confidence, box = inf[0] assert category == 'person' assert confidence == 0.98 - assert x0 == 0 and y0 == 1 and x1 == 2 and y1 == 3 + assert box[0] == 0 and box[1] == 1 and box[2] == 2 and box[3] == 3 assert store._save_sample_called assert store._inf_result == detections assert store._img_path @@ -171,9 +155,7 @@ def test_store_negative_detection(): def test_store_negative_detection_no_inference(): - """ - Expect store to save the image from an inference without any detection. - """ + """Expect store to save the image from an inference without any detection.""" out_dir = os.path.dirname(os.path.abspath(__file__)) out_dir = os.path.join( out_dir, @@ -244,20 +226,9 @@ def test_process_sample_exception(): store = _TestSaveDetectionSamples2(context=context, event_log=logging.getLogger()) img = Image.new('RGB', (60, 30), color='red') - detections = [ - { - 'label': 'person', - 'confidence': 0.98, - 'box': { - 'xmin': 0, - 'ymin': 1, - 'xmax': 2, - 'ymax': 3 - } - } - ] - + ('person', 0.98, (0, 1, 2, 3)) + ] processed_samples = list(store.process_sample(image=img, inference_result=detections, inference_meta=None)) @@ -268,12 +239,7 @@ def test_process_sample_exception(): assert img_out == img inf = processed_samples[0]['inference_result'] print(inf) - - category = inf[0]['label'] - confidence = inf[0]['confidence'] - (x0, y0) = inf[0]['box']['xmin'], inf[0]['box']['ymin'] - (x1, y1) = inf[0]['box']['xmax'], inf[0]['box']['ymax'] - + category, confidence, box = inf[0] assert category == 'person' assert confidence == 0.98 - assert x0 == 0 and y0 == 1 and x1 == 2 and y1 == 3 + assert box[0] == 0 and box[1] == 1 and box[2] == 2 and box[3] == 3