Skip to content

Commit

Permalink
update readme.md, main.cpp, pedestrian_tracker_demo.hpp
Browse files Browse the repository at this point in the history
  • Loading branch information
VoronovaIntern committed Aug 24, 2021
1 parent 6b420ac commit 17400d3
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 10 deletions.
10 changes: 10 additions & 0 deletions demos/pedestrian_tracker_demo/cpp/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ Options:
-limit "<num>" Optional. Number of frames to store in output. If 0 is set, all frames are stored.
-m_det "<path>" Required. Path to the Pedestrian Detection Retail model (.xml) file.
-m_reid "<path>" Required. Path to the Pedestrian Reidentification Retail model (.xml) file.
-at \"<type>\" Required. Required. Architecture type for detector model: centernet, ssd or yolo.
-l "<absolute_path>" Optional. For CPU custom layers, if any. Absolute path to a shared library with the kernels implementation.
Or
-c "<absolute_path>" Optional. For GPU custom kernels, if any. Absolute path to the .xml file with the kernels description.
Expand All @@ -126,6 +127,15 @@ Options:
-delay Optional. Delay between frames used for visualization. If negative, the visualization is turned off (like with the option 'no_show'). If zero, the visualization is made frame-by-frame.
-out "<path>" Optional. The file name to write output log file with results of pedestrian tracking. The format of the log file is compatible with MOTChallenge format.
-u Optional. List of monitors to show initially.
-t Optional. Probability threshold for detections.
-auto_resize Optional. Enables resizable input with support of ROI crop & auto resize.
-iou_t Optional. Filtering intersection over union threshold for overlapping boxes.
-yolo_af Optional. Use advanced postprocessing/filtering algorithm for YOLO.
-labels \"<path>\" Optional. Path to a file with labels mapping.
-nireq \"<integer>\" Optional. Number of infer requests for detector model. If this option is omitted, number of infer requests is determined automatically.
-nstreams Optional. Number of streams to use for inference on the CPU or/and GPU in throughput mode for detector model(for HETERO and MULTI device cases use format <device1>:<nstreams1>,<device2>:<nstreams2> or just <nstreams>)
-nthreads \"<integer>\" Optional. Number of threads for detector model.
-person_label Optional. Label of class person for detector. Default -1 for tracking all objects
```

For example, to run the application with the OpenVINO&trade; toolkit pre-trained models with inferencing pedestrian detector on a GPU and pedestrian reidentification on a CPU, run the following command:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,16 @@ static const char delay_message[] = "Optional. Delay between frames used for vis
static const char output_log_message[] = "Optional. The file name to write output log file with results of pedestrian tracking. "
"The format of the log file is compatible with MOTChallenge format.";
static const char utilization_monitors_message[] = "Optional. List of monitors to show initially.";
static const char at_message[] = "Required. Architecture type: centernet, ssd or yolo";
static const char at_message[] = "Required. Architecture type for detector model: centernet, ssd or yolo.";
static const char thresh_output_message[] = "Optional. Probability threshold for detections.";
static const char input_resizable_message[] = "Optional. Enables resizable input with support of ROI crop & auto resize.";
static const char iou_thresh_output_message[] = "Optional. Filtering intersection over union threshold for overlapping boxes.";
static const char yolo_af_message[] = "Optional. Use advanced postprocessing/filtering algorithm for YOLO.";
static const char labels_message[] = "Optional. Path to a file with labels mapping.";
static const char nireq_message[] = "Optional. Number of infer requests. If this option is omitted, number of infer requests is determined automatically.";
static const char num_threads_message[] = "Optional. Number of threads.";
static const char nireq_message[] = "Optional. Number of infer requests for detector model. If this option is omitted, number of infer requests is determined automatically.";
static const char num_threads_message[] = "Optional. Number of threads for detector model.";
static const char num_streams_message[] = "Optional. Number of streams to use for inference on the CPU or/and GPU in "
"throughput mode (for HETERO and MULTI device cases use format "
"throughput mode for detector model (for HETERO and MULTI device cases use format "
"<device1>:<nstreams1>,<device2>:<nstreams2> or just <nstreams>)";
static const char person_label_message[] = "Optional. Label of class person for detector. Default -1 for tracking all objects";

Expand Down
18 changes: 12 additions & 6 deletions demos/pedestrian_tracker_demo/cpp/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@
#include <models/detection_model_yolo.h>
#include <pipelines/metadata.h>

using namespace InferenceEngine;

using ImageWithFrameIndex = std::pair<cv::Mat, int>;

std::unique_ptr<PedestrianTracker>
Expand Down Expand Up @@ -109,6 +107,7 @@ bool ParseAndCheckCommandLine(int argc, char *argv[]) {

int main(int argc, char **argv) {
try {
PerformanceMetrics metrics;

if (!ParseAndCheckCommandLine(argc, argv)) {
return 0;
Expand Down Expand Up @@ -158,7 +157,7 @@ int main(int argc, char **argv) {

std::vector<std::string> devices{detector_mode, reid_mode};

slog::info << *GetInferenceEngineVersion() << slog::endl;
slog::info << *InferenceEngine::GetInferenceEngineVersion() << slog::endl;
InferenceEngine::Core ie;

auto execNet = detectionModel->loadExecutableNetwork(
Expand All @@ -176,6 +175,7 @@ int main(int argc, char **argv) {
video_fps = 60.0;
}

auto startTime = std::chrono::steady_clock::now();
cv::Mat frame = cap->read();
if (!frame.data) throw std::runtime_error("Can't read an image from the input");
cv::Size firstFrameSize = frame.size();
Expand Down Expand Up @@ -205,11 +205,13 @@ int main(int argc, char **argv) {

auto blobPtr = req->GetBlob(outName);

if (Precision::I32 == blobPtr->getTensorDesc().getPrecision()) {
res.outputsData.emplace(outName, std::make_shared<TBlob<int>>(*as<TBlob<int>>(blobPtr)));
if (InferenceEngine::Precision::I32 == blobPtr->getTensorDesc().getPrecision()) {
res.outputsData.emplace(outName,
std::make_shared<InferenceEngine::TBlob<int>>(*InferenceEngine::as<InferenceEngine::TBlob<int>>(blobPtr)));
}
else {
res.outputsData.emplace(outName, std::make_shared<TBlob<float>>(*as<TBlob<float>>(blobPtr)));
res.outputsData.emplace(outName,
std::make_shared<InferenceEngine::TBlob<float>>(*InferenceEngine::as<InferenceEngine::TBlob<float>>(blobPtr)));
}
}

Expand Down Expand Up @@ -250,6 +252,7 @@ int main(int argc, char **argv) {
tracker->Process(frame, detections, cur_timestamp);

presenter.drawGraphs(frame);
metrics.update(startTime, frame, { 10, 22 }, cv::FONT_HERSHEY_COMPLEX, 0.65);
// Drawing colored "worms" (tracks).
frame = tracker->DrawActiveTracks(frame);

Expand Down Expand Up @@ -284,6 +287,7 @@ int main(int argc, char **argv) {
DetectionLog log = tracker->GetDetectionLog(true);
SaveDetectionLogToTrajFile(detlog_out, log);
}
startTime = std::chrono::steady_clock::now();
frame = cap->read();
if (!frame.data) break;
if (frame.size() != firstFrameSize)
Expand All @@ -299,6 +303,8 @@ int main(int argc, char **argv) {
PrintDetectionLog(log);
}

slog::info << "Metrics report:" << slog::endl;
metrics.logTotal();
slog::info << presenter.reportMeans() << slog::endl;
}
catch (const std::exception& error) {
Expand Down

0 comments on commit 17400d3

Please sign in to comment.