Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Different models per camera #148

Merged
merged 26 commits into from
Apr 13, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
23da089
First endpoints tests (Routers: App and Config)
JFer11 Dec 20, 2020
919363c
First endpoints tests (Routers: App and Config) II
JFer11 Dec 21, 2020
6681c8d
Changes based on comments
JFer11 Dec 23, 2020
dd4155a
First Tests, Refactored
JFer11 Jan 14, 2021
a67a7b5
First endpoints tests (Routers: App and Config) III
JFer11 Jan 21, 2021
da1c868
Merge branch 'tests_api_processor_routers'
JFer11 Jan 25, 2021
bcd8bfd
Merge branch 'master' of github.com:neuralet/smart-social-distancing
JFer11 Jan 29, 2021
a355d86
Merge branch 'master' of github.com:xmartlabs/smart-social-distancing
JFer11 Feb 15, 2021
2bc1b56
Merge branch 'master' of github.com:xmartlabs/smart-social-distancing
JFer11 Mar 2, 2021
06da81d
Merge branch 'master' of github.com:xmartlabs/smart-social-distancing
JFer11 Mar 10, 2021
313baaf
Merge branch 'master' of github.com:xmartlabs/smart-social-distancing
JFer11 Mar 15, 2021
0114e8f
Merge branch 'master' of github.com:xmartlabs/smart-social-distancing
JFer11 Mar 18, 2021
d398543
POC Support different models
JFer11 Mar 19, 2021
c77bcf4
Changes based on comments
JFer11 Mar 23, 2021
02dc9c5
Progress in modify model endpoint
JFer11 Mar 24, 2021
09ca19d
progress in validation
JFer11 Mar 25, 2021
f39e512
progress whole endpoint
JFer11 Mar 29, 2021
b5fa4c3
tests for the endpoint and a couple of fixes
JFer11 Mar 30, 2021
08647cc
README updated
JFer11 Mar 30, 2021
6f419dd
progress based on comments
JFer11 Mar 31, 2021
9250a45
progress based on comments II
JFer11 Mar 31, 2021
8292c38
progress based on comments III
JFer11 Apr 1, 2021
cbf01e7
progress based on comments IV
JFer11 Apr 2, 2021
934b5c2
progress based on comments V
JFer11 Apr 5, 2021
d364a69
little change
JFer11 Apr 7, 2021
3004dbc
mathi comments
JFer11 Apr 12, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 7 additions & 15 deletions api/models/ml_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@
"ssd_mobilenet_v2_pedestrian_softbio", "posenet", "pedestrian_ssd_mobilenet_v2",
"pedestrian_ssdlite_mobilenet_v2"], # All available models.
"x86": ["mobilenet_ssd_v2", "openvino", "openpifpaf", "openpifpaf_tensorrt", "yolov3"],
"x86-gpu": ["mobilenet_ssd_v2", "openvino", "openpifpaf", "openpifpaf_tensorrt", "yolov3"],

"x86-gpu": ["mobilenet_ssd_v2", "openvino", "openpifpaf", "openpifpaf_tensorrt", "yolov3"]
}


Expand Down Expand Up @@ -60,36 +59,29 @@ def check_models_and_device(cls, values):
if values.get("device") == "Jetson":
if values.get("name") not in MODELS_DEVICES["Jetson"]:
raise ValueError(f'The device {values.get("device")} only supports the following models:'
f'"ssd_mobilenet_v2_coco", "ssd_mobilenet_v2_pedestrian_softbio",'
f'"openpifpaf_tensorrt". ')
f' {MODELS_DEVICES["Jetson"]}. ')

elif values.get("device") == "EdgeTPU":
if values.get("name") not in MODELS_DEVICES["EdgeTPU"]:
raise ValueError(f'The device {values.get("device")} only supports the following models:'
f'"mobilenet_ssd_v2", "pedestrian_ssd_mobilenet_v2","pedestrian_ssdlite_mobilenet_v2",'
f'"posenet". ')
f' {MODELS_DEVICES["EdgeTPU"]}. ')

elif values.get("device") == "Dummy":
# No restrictions on this model.
# All available models.
if values.get("name") not in MODELS_DEVICES["Dummy"]:
raise ValueError('The device {values.get("device")} only supports the following models: '
'"openvino", "openpifpaf_tensorrt",'
'"mobilenet_ssd_v2", "openpifpaf", "yolov3",'
'"ssd_mobilenet_v2_coco", "ssd_mobilenet_v2_pedestrian_softbio", "posenet",'
'"pedestrian_ssd_mobilenet_v2", "pedestrian_ssdlite_mobilenet_v2".')
raise ValueError(f'The device {values.get("device")} only supports the following models:'
f' {MODELS_DEVICES["Dummy"]}. ')

elif values.get("device") == "x86":
if values.get("name") not in MODELS_DEVICES["x86"]:
raise ValueError(f'The device {values.get("device")} only supports the following models:'
f'"mobilenet_ssd_v2", "openvino","openpifpaf","openpifpaf_tensorrt"'
f'"yolov3". ')
f' {MODELS_DEVICES["x86"]}. ')

elif values.get("device") == "x86-gpu":
if values.get("name") not in MODELS_DEVICES["x86-gpu"]:
raise ValueError(f'The device {values.get("device")} only supports the following models:'
f'"mobilenet_ssd_v2", "openvino","openpifpaf","openpifpaf_tensorrt"'
f'"yolov3". ')
f' {MODELS_DEVICES["x86-gpu"]}. ')

return values

Expand Down
1 change: 1 addition & 0 deletions api/routers/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ async def update_config_file(config: ConfigDTO, reboot_processor: Optional[bool]

@config_router.get("/info", response_model=ConfigInfo)
async def get_processor_info():
# Here we have to specify the camera right? Or are we asking for the default values?
JFer11 marked this conversation as resolved.
Show resolved Hide resolved
"""
Returns basic info regarding this processor
"""
Expand Down
4 changes: 2 additions & 2 deletions libs/classifiers/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,13 @@ class Classifier:
input image in order to get the classifier results.
:param config: Is a ConfigEngine instance which provides necessary parameters.
"""
def __init__(self, config):
def __init__(self, config, source):
self.config = config
self.classifier = None
classifier_section = self.config.get_section_dict("Classifier")
if classifier_section["Device"] == "Jetson":
from .jetson.classifier import Classifier as JetClassifier
self.classifier = JetClassifier(self.config)
self.classifier = JetClassifier(self.config, source)
elif classifier_section["Device"] == "EdgeTPU":
from .edgetpu.classifier import Classifier as EdgeClassifier
self.classifier = EdgeClassifier(self.config)
Expand Down
4 changes: 2 additions & 2 deletions libs/classifiers/jetson/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@ class Classifier:

:param config: Is a ConfigEngine instance which provides necessary parameters.
"""
def __init__(self, config):
def __init__(self, config, source):
self.config = config
self.name = self.config.get_section_dict('Classifier')['Name']

if self.name == 'OFMClassifier':
from libs.classifiers.jetson import face_mask
self.net = face_mask.Classifier(self.config)
self.net = face_mask.Classifier(self.config, source)
else:
raise ValueError('Not supported network named: ', self.name)

Expand Down
61 changes: 37 additions & 24 deletions libs/classifiers/jetson/face_mask.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,23 @@
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import sys
import sys
import time
import logging
import os
from libs.detectors.utils.fps_calculator import convert_infr_time_to_fps
from libs.detectors.utils.ml_model_functions import get_model_json_file_or_return_default_values

# logging.getLogger().setLevel(logging.INFO)
logger = logging.getLogger(__name__)
JFer11 marked this conversation as resolved.
Show resolved Hide resolved

logging.getLogger().setLevel(logging.INFO)

def allocate_buffers(engine):
host_inputs = []
cuda_inputs = []
host_outputs = []
host_outputs = []
cuda_outputs = []
bindings = []
bindings = []
for i in range(engine.num_bindings):
binding = engine[i]
size = trt.volume(engine.get_binding_shape(binding)) * \
Expand All @@ -31,32 +34,42 @@ def allocate_buffers(engine):
stream = cuda.Stream() # create a CUDA stream to run inference
return bindings, host_inputs, cuda_inputs, host_outputs, cuda_outputs, stream


class Classifier():

"""
Perform image classification with the given model. The model is a tensorrt file
which if the classifier can not find it at the path it will generate it
from provided ONNX file.
:param config: Is a ConfigEngine instance which provides necessary parameters.
"""

def _load_engine(self):
TRTbinPath = self.trt_bin_path
if not os.path.exists(TRTbinPath):
os.system('bash /repo/generate_tensorrt.bash config-jetson.ini 1')
with open(TRTbinPath, 'rb') as f, trt.Runtime(self.trt_logger) as runtime:
return runtime.deserialize_cuda_engine(f.read())

def __init__(self, config):
def __init__(self, config, source):
"""Initialize TensorRT plugins, engine and conetxt."""
self.model = ''
self.fps = None
self.config = config
self.trt_logger = trt.Logger(trt.Logger.INFO)
self.trt_bin_path = self.config.get_section_dict('Classifier')['ModelPath']
self.model_input_size = [int(i) for i in self.config.get_section_dict('Detector')['ImageSize'].split(',')] # TODO: Have a look
self.model_input_size = [int(i) for i in
get_model_json_file_or_return_default_values(
self.config,
self.config.get_section_dict("Detector")["Device"],
self.config.get_section_dict(source)["Id"]
)["variables"]['ImageSize'].split(',')
] # TODO: Have a look. Checkquear. Fer
logger.info("-----------------------------------------")
logger.info(source)
logger.info(self.model_input_size)
logger.info("-----------------------------------------")
self.device = None # enter your Gpu id here
self.cuda_context = None
self.cuda_context = None
self._init_cuda_stuff()

def _init_cuda_stuff(self):
Expand All @@ -71,7 +84,8 @@ def _init_cuda_stuff(self):
self.host_outputs = host_outputs
self.cuda_inputs = cuda_inputs
self.cuda_outputs = cuda_outputs
self.stream = stream
self.stream = stream

def __del__(self):
""" Free CUDA memories. """

Expand All @@ -80,7 +94,6 @@ def __del__(self):
del self.engine_context
del self.engine


def inference(self, resized_rgb_images):
"""
Inference function sets input tensor to input image and gets the output.
Expand All @@ -96,40 +109,40 @@ def inference(self, resized_rgb_images):
host_outputs = self.host_outputs
cuda_inputs = self.cuda_inputs
cuda_outputs = self.cuda_outputs
stream = self.stream
stream = self.stream
t_begin = time.perf_counter()
result = []
scores = []
for img in resized_rgb_images:
for img in resized_rgb_images:
img = np.expand_dims(img, axis=0)
img = img.astype(np.float32)

host_inputs[0] = np.ravel(np.zeros_like(img))
self.cuda_context.push()

self.cuda_context.push()

np.copyto(host_inputs[0], img.ravel())
cuda.memcpy_htod_async(
cuda_inputs[0], host_inputs[0], stream)

self.engine_context.execute_async(
batch_size=1,
bindings=bindings,
stream_handle=stream.handle)

cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)
stream.synchronize()
output_dict = host_outputs[0]
pred = list(np.argmax(host_outputs, axis=1))

# TODO: optimized without for
for i, itm in enumerate(host_outputs):
scores.append(itm[pred[i]])
scores.append(itm[pred[i]])

result.append(pred[0])
self.cuda_context.pop()
inference_time = float(time.perf_counter() - t_begin)
if len(resized_rgb_images) != 0 :
inference_time = float(time.perf_counter() - t_begin)
if len(resized_rgb_images) != 0:
inference_time = inference_time / len(resized_rgb_images)
self.fps = convert_infr_time_to_fps(inference_time)
self.fps = convert_infr_time_to_fps(inference_time)
return result, scores
2 changes: 1 addition & 1 deletion libs/cv_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def __init__(self, config, source):
self.classifier = None

if "Classifier" in self.config.get_sections():
self.classifier = Classifier(self.config)
self.classifier = Classifier(self.config, source)

# Init post processors
self.post_processors = []
Expand Down
7 changes: 3 additions & 4 deletions libs/detectors/detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,9 @@ def __init__(self, config, source):
self.config = config
self.device = self.config.get_section_dict("Detector")["Device"]
self.resolution = tuple([int(i) for i in self.config.get_section_dict("App")["Resolution"].split(",")])
self.image_size = [int(i) for i in get_model_json_file_or_return_default_values(self.config, self.device, self.config.get_section_dict(source)["Id"])["variables"]["ImageSize"].split(",")]
logger.info("-----------------------------------------")
logger.info(source)
logger.info("-----------------------------------------")
self.image_size = [int(i) for i in get_model_json_file_or_return_default_values(
self.config, self.device, self.config.get_section_dict(source)["Id"])["variables"]["ImageSize"].split(",")]
# TODO: Have a look. Fer. Above
self.has_classifier = "Classifier" in self.config.get_sections()
if self.has_classifier:
self.classifier_img_size = [
Expand Down
12 changes: 9 additions & 3 deletions libs/loggers/source_loggers/video_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import os
import shutil

from libs.detectors.utils.ml_model_functions import get_model_json_file_or_return_default_values
from libs.utils import visualization_utils
from libs.source_post_processors.objects_filtering import ObjectsFilteringPostProcessor

Expand Down Expand Up @@ -94,11 +95,16 @@ def update(self, cv_image, objects, post_processing_data, fps):
dist_threshold = post_processing_data.get("dist_threshold", 0)

birds_eye_window = np.zeros(self.birds_eye_resolution[::-1] + (3,), dtype="uint8")
class_id = int(self.config.get_section_dict('Detector')['ClassID']) # TODO: Have a look.

class_id = int(
get_model_json_file_or_return_default_values(
self.config,
self.config.get_section_dict('Detector')['Device'],
self.camera_id
)["variables"]["ClassID"]
) # TODO: have a look. Fer
roi_contour = ObjectsFilteringPostProcessor.get_roi_contour(self.roi_file_path)
if roi_contour is not None:
color = (41, 127, 255) # #ff7f29 (255, 127, 41)
color = (41, 127, 255) # #ff7f29 (255, 127, 41)
visualization_utils.draw_contour(cv_image, roi_contour, color)

output_dict = visualization_utils.visualization_preparation(objects, distancings, dist_threshold)
Expand Down