Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add modelAPI to pedestian_tracker_demo #2654

Merged
merged 28 commits into from
Sep 24, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
e937b44
add modelAPI to pedestian_tracker_demo last changes
VoronovaIntern Aug 2, 2021
f44b680
mist
VoronovaIntern Aug 4, 2021
b47398d
mist 2
VoronovaIntern Aug 4, 2021
a2c6518
add model API to pedestrian_tracker_demo with changes
VoronovaIntern Aug 4, 2021
fd3ccf1
solve mistakes
VoronovaIntern Aug 6, 2021
fb9a776
solve mistakes 3
VoronovaIntern Aug 9, 2021
1ea4925
solve mistakes 4
VoronovaIntern Aug 9, 2021
cb52ca7
add key person_label and tests
VoronovaIntern Aug 16, 2021
71df42c
missing whitespace
VoronovaIntern Aug 17, 2021
fb1d485
mist
VoronovaIntern Aug 4, 2021
f6c43a0
mist 2
VoronovaIntern Aug 4, 2021
cee629b
add model API to pedestrian_tracker_demo with changes
VoronovaIntern Aug 4, 2021
485cfc0
solve mistakes
VoronovaIntern Aug 6, 2021
7efa244
solve mistakes 3
VoronovaIntern Aug 9, 2021
0bd4e29
solve mistakes 4
VoronovaIntern Aug 9, 2021
36f6cd1
add key person_label and tests
VoronovaIntern Aug 16, 2021
c9ac729
missing whitespace
VoronovaIntern Aug 17, 2021
9b8105e
cases.py
VoronovaIntern Aug 17, 2021
4c42f82
update readme.md, main.cpp, pedestrian_tracker_demo.hpp
VoronovaIntern Aug 24, 2021
154c802
cases,py
VoronovaIntern Aug 25, 2021
6a16ecb
Update demos/pedestrian_tracker_demo/cpp/README.md
VoronovaIntern Aug 25, 2021
8183fa0
back to detection wrapper
VoronovaIntern Sep 22, 2021
3f2da9c
update cases.py
VoronovaIntern Sep 23, 2021
5ce3806
updates cases.py
VoronovaIntern Sep 23, 2021
0ee9726
update README.md
VoronovaIntern Sep 24, 2021
4c6c82a
fix warning and stylish
vladimir-dudnik Sep 24, 2021
64e3da5
one more warning fix
vladimir-dudnik Sep 24, 2021
2be5baf
fix yet another warning
vladimir-dudnik Sep 24, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions demos/common/cpp/models/src/detection_model_yolo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -155,32 +155,32 @@ void ModelYolo::prepareInputsOutputs(InferenceEngine::CNNNetwork& cnnNetwork) {
}

std::sort(outputsNames.begin(), outputsNames.end(),
[&outputInfo](const std::string& x, const std::string& y) {return outputInfo[x]->getDims()[2] > outputInfo[y]->getDims()[2];});
[&outputInfo](const std::string& x, const std::string& y) {return outputInfo[x]->getDims()[2] > outputInfo[y]->getDims()[2];});

for (const auto& name : outputsNames) {
auto& output = outputInfo[name];
auto shape = output->getDims();
auto classes = shape[1] / num - 4 - isObjConf;
int classes = (int)shape[1] / num - 4 - (isObjConf ? 1 : 0);
if (shape[1] % num != 0) {
throw std::runtime_error(std::string("The output blob ") + name + " has wrong 2nd dimension");
}
regions.emplace(name, Region(classes, 4,
presetAnchors.size() ? presetAnchors : defaultAnchors[yoloVersion],
std::vector<int64_t>(chosenMasks.begin() + i*num, chosenMasks.begin() + (i+1)*num),
shape[3], shape[2]));
(int)shape[3], (int)shape[2]));
i++;
}
}
else {
// Currently externally set anchors and masks are supported only for YoloV4
if(presetAnchors.size() || presetMasks.size()){
if(presetAnchors.size() || presetMasks.size()) {
slog::warn << "Preset anchors and mask can be set for YoloV4 model only. "
"This model is not YoloV4, so these options will be ignored." << slog::endl;
}
}
}

std::unique_ptr<ResultBase> ModelYolo::postprocess(InferenceResult & infResult) {
std::unique_ptr<ResultBase> ModelYolo::postprocess(InferenceResult& infResult) {
DetectionResult* result = new DetectionResult(infResult.frameId, infResult.metaData);
std::vector<DetectedObject> objects;

Expand Down Expand Up @@ -333,8 +333,8 @@ ModelYolo::Region::Region(const std::shared_ptr<ngraph::op::RegionYolo>& regionY
num = mask.size();

auto shape = regionYolo->get_input_shape(0);
outputWidth = shape[3];
outputHeight = shape[2];
outputWidth = (int)shape[3];
outputHeight = (int)shape[2];

if (num) {

Expand Down
11 changes: 7 additions & 4 deletions demos/pedestrian_tracker_demo/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

file(GLOB_RECURSE SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
file(GLOB_RECURSE SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp ../${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp ../${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)




add_demo(NAME pedestrian_tracker_demo
SOURCES ${SOURCES}
HEADERS ${HEADERS}
INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include"
DEPENDENCIES monitors)
DEPENDENCIES monitors models pipelines)
69 changes: 63 additions & 6 deletions demos/pedestrian_tracker_demo/cpp/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,58 @@ python3 <omz_dir>/tools/downloader/converter.py --list models.lst

### Supported Models

* person-detection-retail-0002
* person-detection-retail-0013
* person-reidentification-retail-0277
* person-reidentification-retail-0286
* person-reidentification-retail-0287
* person-reidentification-retail-0288
* architecture_type = centernet
- ctdet_coco_dlav0_384
- ctdet_coco_dlav0_512
* architecture_type = ssd
- efficientdet-d0-tf
- efficientdet-d1-tf
- faster-rcnn-resnet101-coco-sparse-60-0001
- pedestrian-and-vehicle-detector-adas-0001
- pedestrian-detection-adas-0002
- pelee-coco
- person-detection-0106
- person-detection-0200
- person-detection-0201
- person-detection-0202
- person-detection-0203
- person-detection-retail-0002
- person-detection-retail-0013
- person-vehicle-bike-detection-2000
- person-vehicle-bike-detection-2001
- person-vehicle-bike-detection-2002
- person-vehicle-bike-detection-2003
- person-vehicle-bike-detection-2004
- rfcn-resnet101-coco-tf
- retinanet-tf
- ssd300
- ssd512
- ssd-resnet34-1200-onnx
- ssd_mobilenet_v1_coco
- ssd_mobilenet_v1_fpn_coco
- ssd_mobilenet_v2_coco
- ssd_resnet50_v1_fpn_coco
- ssdlite_mobilenet_v2
- vehicle-detection-adas-0002
* architecture_type = yolo
- person-vehicle-bike-detection-crossroad-yolov3-1020
- yolo-v3-tf
- yolo-v3-tiny-tf
- yolo-v1-tiny-tf
- yolo-v2-ava-0001
- yolo-v2-ava-sparse-35-0001
- yolo-v2-ava-sparse-70-0001
- yolo-v2-tf
- yolo-v2-tiny-ava-0001
- yolo-v2-tiny-ava-sparse-30-0001
- yolo-v2-tiny-ava-sparse-60-0001
- yolo-v2-tiny-tf
- yolo-v2-tiny-vehicle-detection-0001
* reidentification models
- person-reidentification-retail-0277
- person-reidentification-retail-0286
- person-reidentification-retail-0287
- person-reidentification-retail-0288

> **NOTE**: Refer to the tables [Intel's Pre-Trained Models Device Support](../../../models/intel/device_support.md) and [Public Pre-Trained Models Device Support](../../../models/public/device_support.md) for the details on models inference support at different devices.

Expand All @@ -69,6 +115,7 @@ Options:
-o "<path>" Optional. Name of the output file(s) to save.
-limit "<num>" Optional. Number of frames to store in output. If 0 is set, all frames are stored.
-m_det "<path>" Required. Path to the Pedestrian Detection Retail model (.xml) file.
-at \"<type>\" Required. Architecture type for detector model: centernet, ssd or yolo.
-m_reid "<path>" Required. Path to the Pedestrian Reidentification Retail model (.xml) file.
-l "<absolute_path>" Optional. For CPU custom layers, if any. Absolute path to a shared library with the kernels implementation.
Or
Expand All @@ -80,6 +127,15 @@ Options:
-delay Optional. Delay between frames used for visualization. If negative, the visualization is turned off (like with the option 'no_show'). If zero, the visualization is made frame-by-frame.
-out "<path>" Optional. The file name to write output log file with results of pedestrian tracking. The format of the log file is compatible with MOTChallenge format.
-u Optional. List of monitors to show initially.
-t Optional. Probability threshold for detections.
VoronovaIntern marked this conversation as resolved.
Show resolved Hide resolved
-auto_resize Optional. Enables resizable input with support of ROI crop & auto resize.
-iou_t Optional. Filtering intersection over union threshold for overlapping boxes.
-yolo_af Optional. Use advanced postprocessing/filtering algorithm for YOLO.
-labels \"<path>\" Optional. Path to a file with labels mapping.
-nireq \"<integer>\" Optional. Number of infer requests for detector model. If this option is omitted, number of infer requests is determined automatically.
-nstreams Optional. Number of streams to use for inference on the CPU or/and GPU in throughput mode for detector model (for HETERO and MULTI device cases use format <device1>:<nstreams1>,<device2>:<nstreams2> or just <nstreams>)
-nthreads \"<integer>\" Optional. Number of threads for detector model.
-person_label Optional. Label of class person for detector. Default -1 for tracking all objects
```

For example, to run the application with the OpenVINO&trade; toolkit pre-trained models with inferencing pedestrian detector on a GPU and pedestrian reidentification on a CPU, run the following command:
Expand All @@ -89,6 +145,7 @@ For example, to run the application with the OpenVINO&trade; toolkit pre-trained
-m_det <path_to_model>/person-detection-retail-0013.xml \
-m_reid <path_to_model>/person-reidentification-retail-0277.xml \
-d_det GPU
-at ssd
```

>**NOTE**: If you provide a single image as an input, the demo processes and renders it quickly, then exits. To continuously visualize inference results on the screen, apply the `loop` option, which enforces processing a single image in a loop.
Expand Down
10 changes: 5 additions & 5 deletions demos/pedestrian_tracker_demo/cpp/include/cnn.hpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand All @@ -17,8 +17,8 @@
/**
* @brief Base class of config for network
*/
struct CnnConfig {
explicit CnnConfig(const std::string& path_to_model)
struct CnnConfigTracker {
explicit CnnConfigTracker(const std::string& path_to_model)
: path_to_model(path_to_model) {}

/** @brief Path to model description */
Expand All @@ -32,7 +32,7 @@ struct CnnConfig {
*/
class CnnBase {
public:
using Config = CnnConfig;
using Config = CnnConfigTracker;

/**
* @brief Constructor
Expand Down Expand Up @@ -92,7 +92,7 @@ class CnnBase {

class VectorCNN : public CnnBase {
public:
VectorCNN(const CnnConfig& config,
VectorCNN(const CnnConfigTracker& config,
const InferenceEngine::Core & ie,
const std::string & deviceName);

Expand Down
6 changes: 3 additions & 3 deletions demos/pedestrian_tracker_demo/cpp/include/core.hpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand All @@ -17,7 +17,7 @@
struct TrackedObject {
cv::Rect rect; ///< Detected object ROI (zero area if N/A).
double confidence; ///< Detection confidence level (-1 if N/A).
int frame_idx; ///< Frame index where object was detected (-1 if N/A).
int64_t frame_idx; ///< Frame index where object was detected (-1 if N/A).
int object_id; ///< Unique object identifier (-1 if N/A).
uint64_t timestamp; ///< Timestamp in milliseconds.

Expand All @@ -37,7 +37,7 @@ struct TrackedObject {
/// \param frame_idx Index of frame.
/// \param object_id Object ID.
///
TrackedObject(const cv::Rect &rect, float confidence, int frame_idx,
TrackedObject(const cv::Rect &rect, float confidence, int64_t frame_idx,
int object_id)
: rect(rect),
confidence(confidence),
Expand Down
6 changes: 3 additions & 3 deletions demos/pedestrian_tracker_demo/cpp/include/descriptor.hpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -107,9 +107,9 @@ class DescriptorIE : public IImageDescriptor {
VectorCNN handler;

public:
DescriptorIE(const CnnConfig& config,
DescriptorIE(const CnnConfigTracker& config,
const InferenceEngine::Core& ie,
const std::string & deviceName):
const std::string& deviceName):
handler(config, ie, deviceName) {}

///
Expand Down
63 changes: 0 additions & 63 deletions demos/pedestrian_tracker_demo/cpp/include/detector.hpp

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -40,6 +40,18 @@ static const char delay_message[] = "Optional. Delay between frames used for vis
static const char output_log_message[] = "Optional. The file name to write output log file with results of pedestrian tracking. "
"The format of the log file is compatible with MOTChallenge format.";
static const char utilization_monitors_message[] = "Optional. List of monitors to show initially.";
static const char at_message[] = "Required. Architecture type for detector model: centernet, ssd or yolo.";
static const char thresh_output_message[] = "Optional. Probability threshold for detections.";
static const char input_resizable_message[] = "Optional. Enables resizable input with support of ROI crop & auto resize.";
static const char iou_thresh_output_message[] = "Optional. Filtering intersection over union threshold for overlapping boxes.";
static const char yolo_af_message[] = "Optional. Use advanced postprocessing/filtering algorithm for YOLO.";
static const char labels_message[] = "Optional. Path to a file with labels mapping.";
static const char nireq_message[] = "Optional. Number of infer requests for detector model. If this option is omitted, number of infer requests is determined automatically.";
static const char num_threads_message[] = "Optional. Number of threads for detector model.";
static const char num_streams_message[] = "Optional. Number of streams to use for inference on the CPU or/and GPU in "
"throughput mode for detector model (for HETERO and MULTI device cases use format "
"<device1>:<nstreams1>,<device2>:<nstreams2> or just <nstreams>)";
static const char person_label_message[] = "Optional. Label of class person for detector. Default -1 for tracking all objects";


DEFINE_bool(h, false, help_message);
Expand All @@ -56,6 +68,16 @@ DEFINE_bool(no_show, false, no_show_message);
DEFINE_int32(delay, 3, delay_message);
DEFINE_string(out, "", output_log_message);
DEFINE_string(u, "", utilization_monitors_message);
DEFINE_string(at, "", at_message);
DEFINE_double(t, 0.5, thresh_output_message);
DEFINE_bool(auto_resize, false, input_resizable_message);
DEFINE_double(iou_t, 0.5, iou_thresh_output_message);
DEFINE_bool(yolo_af, true, yolo_af_message);
DEFINE_string(labels, "", labels_message);
DEFINE_uint32(nireq, 0, nireq_message);
DEFINE_uint32(nthreads, 0, num_threads_message);
DEFINE_string(nstreams, "", num_streams_message);
DEFINE_int32(person_label, -1, person_label_message);


/**
Expand Down Expand Up @@ -85,4 +107,14 @@ static void showUsage() {
std::cout << " -delay " << delay_message << std::endl;
std::cout << " -out \"<path>\" " << output_log_message << std::endl;
std::cout << " -u " << utilization_monitors_message << std::endl;
std::cout << " -at \"<type>\" " << at_message << std::endl;
std::cout << " -t " << thresh_output_message << std::endl;
std::cout << " -auto_resize " << input_resizable_message << std::endl;
std::cout << " -iou_t " << iou_thresh_output_message << std::endl;
std::cout << " -yolo_af " << yolo_af_message << std::endl;
std::cout << " -labels \"<path>\" " << labels_message << std::endl;
std::cout << " -nireq \"<integer>\" " << nireq_message << std::endl;
std::cout << " -nstreams " << num_streams_message << std::endl;
std::cout << " -nthreads \"<integer>\" " << num_threads_message << std::endl;
std::cout << " -person_label " << person_label_message << std::endl;
}
6 changes: 3 additions & 3 deletions demos/pedestrian_tracker_demo/cpp/include/tracker.hpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand Down Expand Up @@ -303,8 +303,8 @@ class PedestrianTracker {

private:
struct Match {
int frame_idx1;
int frame_idx2;
int64_t frame_idx1;
int64_t frame_idx2;
cv::Rect rect1;
cv::Rect rect2;
cv::Rect pr_rect1;
Expand Down
4 changes: 2 additions & 2 deletions demos/pedestrian_tracker_demo/cpp/include/utils.hpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

Expand All @@ -24,7 +24,7 @@
///
struct DetectionLogEntry {
TrackedObjects objects; ///< Detected objects.
int frame_idx; ///< Processed frame index (-1 if N/A).
int64_t frame_idx; ///< Processed frame index (-1 if N/A).
double time_ms; ///< Frame processing time in ms (-1 if N/A).

///
Expand Down
Loading