From 5472d9036896b8d36f8e3942798b7e333138741e Mon Sep 17 00:00:00 2001 From: Koushik Dutta Date: Sun, 26 Mar 2023 19:21:22 -0700 Subject: [PATCH] opencv: beta --- plugins/opencv/package-lock.json | 4 +- plugins/opencv/package.json | 2 +- plugins/opencv/src/requirements.txt | 1 + .../tensorflow-lite/src/predict/__init__.py | 90 +------------------ plugins/tensorflow-lite/src/requirements.txt | 6 -- .../tensorflow-lite/src/tflite/__init__.py | 3 + 6 files changed, 8 insertions(+), 98 deletions(-) diff --git a/plugins/opencv/package-lock.json b/plugins/opencv/package-lock.json index 1e33a97ed2..63c17cc712 100644 --- a/plugins/opencv/package-lock.json +++ b/plugins/opencv/package-lock.json @@ -1,12 +1,12 @@ { "name": "@scrypted/opencv", - "version": "0.0.69", + "version": "0.0.70", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@scrypted/opencv", - "version": "0.0.69", + "version": "0.0.70", "devDependencies": { "@scrypted/sdk": "file:../../sdk" } diff --git a/plugins/opencv/package.json b/plugins/opencv/package.json index 33a8e67238..79da397225 100644 --- a/plugins/opencv/package.json +++ b/plugins/opencv/package.json @@ -36,5 +36,5 @@ "devDependencies": { "@scrypted/sdk": "file:../../sdk" }, - "version": "0.0.69" + "version": "0.0.70" } diff --git a/plugins/opencv/src/requirements.txt b/plugins/opencv/src/requirements.txt index 172b88aa29..6ef7f28bd5 100644 --- a/plugins/opencv/src/requirements.txt +++ b/plugins/opencv/src/requirements.txt @@ -9,3 +9,4 @@ imutils>=0.5.0 av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64' # not available on armhf opencv-python; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64' + diff --git a/plugins/tensorflow-lite/src/predict/__init__.py b/plugins/tensorflow-lite/src/predict/__init__.py index c292bcf5af..3224c4b82f 100644 --- a/plugins/tensorflow-lite/src/predict/__init__.py +++ b/plugins/tensorflow-lite/src/predict/__init__.py @@ -13,7 +13,6 @@ from detect import DetectionSession, DetectPlugin -from .sort_oh import tracker import numpy as np import traceback @@ -24,14 +23,12 @@ class PredictSession(DetectionSession): image: Image.Image - tracker: sort_oh.tracker.Sort_OH def __init__(self, start_time: float) -> None: super().__init__() self.image = None self.processed = 0 self.start_time = start_time - self.tracker = None def parse_label_contents(contents: str): lines = contents.splitlines() @@ -121,7 +118,6 @@ def __init__(self, PLUGIN_MIME_TYPE: str, nativeId: str | None = None): self.toMimeType = scrypted_sdk.ScryptedMimeTypes.MediaObject.value self.crop = False - self.trackers: Mapping[str, tracker.Sort_OH] = {} # periodic restart because there seems to be leaks in tflite or coral API. loop = asyncio.get_event_loop() @@ -210,23 +206,7 @@ def getModelSettings(self, settings: Any = None) -> list[Setting]: ], } - trackerWindow: Setting = { - 'title': 'Tracker Window', - 'subgroup': 'Advanced', - 'description': 'Internal Setting. Do not change.', - 'key': 'trackerWindow', - 'value': 3, - 'type': 'number', - } - trackerCertainty: Setting = { - 'title': 'Tracker Certainty', - 'subgroup': 'Advanced', - 'description': 'Internal Setting. Do not change.', - 'key': 'trackerCertainty', - 'value': .2, - 'type': 'number', - } - return [allowList, trackerWindow, trackerCertainty] + return [allowList] def create_detection_result(self, objs: List[Prediction], size, allowList, convert_to_src_size=None) -> ObjectsDetected: detections: List[ObjectDetectionResult] = [] @@ -400,24 +380,6 @@ async def run_detection_image(self, detection_session: PredictSession, image: Im (w, h) = self.get_input_size() or image.size (iw, ih) = image.size - if detection_session and not detection_session.tracker: - t = self.trackers.get(detection_session.id) - if not t: - t = tracker.Sort_OH(scene=np.array([iw, ih])) - trackerCertainty = settings.get('trackerCertainty') - if not isinstance(trackerCertainty, int): - trackerCertainty = .2 - t.conf_three_frame_certainty = trackerCertainty * 3 - trackerWindow = settings.get('trackerWindow') - if not isinstance(trackerWindow, int): - trackerWindow = 3 - t.conf_unmatched_history_size = trackerWindow - self.trackers[detection_session.id] = t - detection_session.tracker = t - # conf_trgt = 0.35 - # conf_objt = 0.75 - # detection_session.tracker.conf_trgt = conf_trgt - # detection_session.tracker.conf_objt = conf_objt # this a single pass or the second pass. detect once and return results. if multipass_crop: @@ -543,61 +505,11 @@ def is_same_detection_middle(d1: ObjectDetectionResult, d2: ObjectDetectionResul ret = ret1 ret['detections'] = dedupe_detections(ret1['detections'] + ret2['detections'], is_same_detection=is_same_detection_middle) - if detection_session: - self.track(detection_session, ret) - if not len(ret['detections']): return ret, RawImage(image) return ret, RawImage(image) - def track(self, detection_session: PredictSession, ret: ObjectsDetected): - detections = ret['detections'] - sort_input = [] - for d in ret['detections']: - r: ObjectDetectionResult = d - l, t, w, h = r['boundingBox'] - sort_input.append([l, t, l + w, t + h, r['score']]) - trackers, unmatched_trckr, unmatched_gts = detection_session.tracker.update(np.array(sort_input), []) - for td in trackers: - x0, y0, x1, y1, trackID = td[0].item(), td[1].item( - ), td[2].item(), td[3].item(), td[4].item() - slop = 0 - obj: ObjectDetectionResult = None - ta = (x1 - x0) * (y1 - y0) - box = Rectangle(x0, y0, x1, y1) - for d in detections: - if d.get('id'): - continue - ob: ObjectDetectionResult = d - dx0, dy0, dw, dh = ob['boundingBox'] - dx1 = dx0 + dw - dy1 = dy0 + dh - da = dw * dh - area = intersect_area(Rectangle(dx0, dy0, dx1, dy1), box) - if not area: - continue - # intersect area always gonna be smaller than - # the detection or tracker area. - # greater numbers, ie approaching 2, is better. - dslop = area / ta + area / da - if (dslop > slop): - slop = dslop - obj = ob - if obj: - obj['id'] = str(trackID) - # this may happen if tracker predicts something is still in the scene - # but was not detected - # else: - # print('unresolved tracker') - # for d in detections: - # if not d.get('id'): - # # this happens if the tracker is not confident in a new detection yet due - # # to low score or has not been found in enough frames - # if d['className'] == 'person': - # print('untracked %s: %s' % (d['className'], d['score'])) - - async def run_detection_crop(self, detection_session: DetectionSession, sample: RawImage, settings: Any, src_size, convert_to_src_size, bounding_box: Tuple[float, float, float, float]) -> ObjectsDetected: (ret, _) = await self.run_detection_image(detection_session, sample.image, settings, src_size, convert_to_src_size, bounding_box) return ret diff --git a/plugins/tensorflow-lite/src/requirements.txt b/plugins/tensorflow-lite/src/requirements.txt index 57b13ee5c1..0751583052 100644 --- a/plugins/tensorflow-lite/src/requirements.txt +++ b/plugins/tensorflow-lite/src/requirements.txt @@ -7,10 +7,4 @@ Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64' pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64' pycoral~=2.0 PyGObject>=3.30.4; sys_platform != 'win32' -# libav doesnt work on arm7 -av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64' tflite-runtime==2.5.0.post1 - -# sort_oh -scipy -filterpy diff --git a/plugins/tensorflow-lite/src/tflite/__init__.py b/plugins/tensorflow-lite/src/tflite/__init__.py index 796c10352c..bd2f50e0d9 100644 --- a/plugins/tensorflow-lite/src/tflite/__init__.py +++ b/plugins/tensorflow-lite/src/tflite/__init__.py @@ -21,6 +21,7 @@ import concurrent.futures import queue import asyncio +from time import time def parse_label_contents(contents: str): lines = contents.splitlines() @@ -116,6 +117,7 @@ async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss): def predict(): interpreter = self.interpreters.get() try: + print('predict s %s' % time()) common.set_input( interpreter, input) scale = (1, 1) @@ -131,6 +133,7 @@ def predict(): raise e finally: self.interpreters.put(interpreter) + print('predict e %s' % time()) objs = await asyncio.get_event_loop().run_in_executor(self.executor, predict)