Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable no-copy API in Model API OVMS Adapter to optimize Embedding endpoint #92

Merged
merged 12 commits into from
Nov 4, 2024
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 0 additions & 10 deletions .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -106,21 +106,11 @@ try-import %workspace%/.user.bazelrc

# Disable native ovms mediapipe support when building in mediapipe repository
build --define=MEDIAPIPE_DISABLE=1
coverage --define=MEDIAPIPE_DISABLE=1
test --define=MEDIAPIPE_DISABLE=1

build --cxxopt=-DMEDIAPIPE_DISABLE=1
coverage --cxxopt=-DMEDIAPIPE_DISABLE=1
test --cxxopt=-DMEDIAPIPE_DISABLE=1

build --define=PYTHON_DISABLE=1
coverage --define=PYTHON_DISABLE=1
test --define=PYTHON_DISABLE=1

build --cxxopt=-DPYTHON_DISABLE=1
coverage --cxxopt=-DPYTHON_DISABLE=1
test --cxxopt=-DPYTHON_DISABLE=1

build --cxxopt=-DOVMS_DUMP_TO_FILE=0
coverage --cxxopt=-DOVMS_DUMP_TO_FILE=0
test --cxxopt=-DOVMS_DUMP_TO_FILE=0
1 change: 1 addition & 0 deletions Dockerfile.openvino
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,7 @@ RUN wget -O video.mp4 "https://www.pexels.com/download/video/3044127/?fps=24.0&h
COPY mediapipe /mediapipe/mediapipe/
COPY third_party /mediapipe/third_party/
COPY .bazelrc /mediapipe/
COPY .user.bazelrc /mediapipe/
COPY .bazelversion /mediapipe/
COPY *.sh /mediapipe/
COPY *.py /mediapipe/
Expand Down
13 changes: 3 additions & 10 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,7 @@ load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
git_repository(
name = "ovms",
remote = "https://github.com/openvinotoolkit/model_server",
commit = "ba43198285a1bbad7ec74b672161eb84d27e6adf" # Windows groovy (#2762)
commit = "aa07d47407557781036e2e4f00501bfa5bf3c79b" # Windows groovy (#2762)
)

# DEV ovms - adjust local repository path for build
Expand Down Expand Up @@ -837,15 +837,8 @@ git_repository(
patches = ["@ovms//external:mwaitpkg.patch",]
)

load("@rules_foreign_cc//foreign_cc:cmake.bzl", "cmake")
load("@//third_party/model_api:model_api.bzl", "model_api_repository")
model_api_repository(name="_model-api")
new_git_repository(
name = "model_api",
remote = "https:///github.com/openvinotoolkit/model_api/",
build_file = "@_model-api//:BUILD",
commit = "eb9fcfb1e1eebc047ff144707f76203b132e1aa6" # master Jun 24 15:02:17 2024 [cpp] Fix num classes check
)
load("@//third_party/model_api:model_api.bzl", "workspace_model_api")
workspace_model_api()

git_repository(
name = "nlohmann_json",
Expand Down
2 changes: 2 additions & 0 deletions ci/testOnCommit.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ pipeline {
}
stage("build image") {
steps {
// TODO this should be enabled after Jenkins update
// sh "echo build --remote_cache=${env.OVMS_BAZEL_REMOTE_CACHE_URL} > .user.bazelrc"
sh script: "make docker_build OVMS_MEDIA_IMAGE_TAG=${shortCommit}"
}
}
Expand Down
133 changes: 121 additions & 12 deletions mediapipe/calculators/ovms/modelapiovmsadapter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,31 +70,109 @@ OVMSInferenceAdapter::~OVMSInferenceAdapter() {
LOG(INFO) << "OVMSAdapter destr";
}

InferenceOutput OVMSInferenceAdapter::infer(const InferenceInput& input) {
inline std::vector<int64_t> getShapeAcceptableByCAPI(const ov::Shape& shape) {
if (std::any_of(shape.begin(), shape.end(), [](size_t dim) {
return dim > std::numeric_limits<int64_t>::max();})) {
throw std::runtime_error("Cannot use C-API with dimension size greater than int64_t max value");
}
return std::vector<int64_t>{shape.begin(), shape.end()};
}

void OVMSInferenceAdapter::infer(const InferenceInput& input, InferenceOutput& output) {
/////////////////////
// PREPARE REQUEST
/////////////////////
OVMS_InferenceRequest* request{nullptr};
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceRequestNew(&request, cserver, servableName.c_str(), servableVersion));
CREATE_GUARD(requestGuard, OVMS_InferenceRequest, request);

InferenceOutput output;

OVMS_Status* status{nullptr};
std::vector<std::string> outputsSet;
// PREPARE EACH INPUT
// extract single tensor
for (const auto& [name, input_tensor] : input) {
const char* realInputName = name.c_str();
const auto& ovinputShape = input_tensor.get_shape();
if (std::any_of(ovinputShape.begin(), ovinputShape.end(), [](size_t dim) {
return dim > std::numeric_limits<int64_t>::max();})) {
throw std::runtime_error("Cannot use C-API with dimension size greater than int64_t max value");
const char* realName = name.c_str();
const auto& ovShape = input_tensor.get_shape();
std::vector<int64_t> capiShape = getShapeAcceptableByCAPI(ovShape);
OVMS_DataType inputDataType = OVPrecision2CAPI(input_tensor.get_element_type());
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceRequestAddInput(request, realName, inputDataType, capiShape.data(), capiShape.size()));
const uint32_t NOT_USED_NUM = 0;
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceRequestInputSetData(request,
realName,
reinterpret_cast<void*>(input_tensor.data()),
input_tensor.get_byte_size(),
OVMS_BUFFERTYPE_CPU,
NOT_USED_NUM));
}
for (const auto& [name, output_tensor] : output) {
outputsSet.emplace_back(name);
const char* realName = name.c_str();
const auto& ovShape = output_tensor.get_shape();
std::vector<int64_t> capiShape = getShapeAcceptableByCAPI(ovShape);
OVMS_DataType inputDataType = OVPrecision2CAPI(output_tensor.get_element_type());
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceRequestAddOutput(request, realName, inputDataType, capiShape.data(), capiShape.size()));
const uint32_t NOT_USED_NUM = 0;
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceRequestOutputSetData(request,
realName,
reinterpret_cast<void*>(output_tensor.data()),
output_tensor.get_byte_size(),
OVMS_BUFFERTYPE_CPU,
NOT_USED_NUM));

}
#if (OVMS_DUMP_TO_FILE == 1)
dumpOvTensorInput(input,"input");
#endif
//////////////////
// INFERENCE
//////////////////
OVMS_InferenceResponse* response = nullptr;
ASSERT_CAPI_STATUS_NULL(OVMS_Inference(cserver, request, &response));
CREATE_GUARD(responseGuard, OVMS_InferenceResponse, response);
uint32_t outputCount = 42;
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceResponseOutputCount(response, &outputCount));
uint32_t parameterCount = 42;
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceResponseParameterCount(response, &parameterCount));
const void* voutputData;
size_t bytesize = 42;
OVMS_DataType datatype = (OVMS_DataType)199;
const int64_t* shape{nullptr};
size_t dimCount = 42;
OVMS_BufferType bufferType = (OVMS_BufferType)199;
uint32_t deviceId = 42;
const char* outputName{nullptr};
for (size_t i = 0; i < outputCount; ++i) {
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceResponseOutput(response, i, &outputName, &datatype, &shape, &dimCount, &voutputData, &bytesize, &bufferType, &deviceId));
if (std::find(outputsSet.begin(), outputsSet.end(), outputName) == outputsSet.end()) {
output.emplace(outputName, std::move(makeOvTensor(datatype, shape, dimCount, voutputData, bytesize)));
} else {
//output.emplace(outputName, input.at(outputName));
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does it mean that we only get selected outputs?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This line should probably be removed

}
std::vector<int64_t> inputShape{ovinputShape.begin(), ovinputShape.end()};
}
#if (OVMS_DUMP_TO_FILE == 1)
dumpOvTensorInput(output,"output");
#endif
}
InferenceOutput OVMSInferenceAdapter::infer(const InferenceInput& input) {
/////////////////////
// PREPARE REQUEST
/////////////////////
OVMS_InferenceRequest* request{nullptr};
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceRequestNew(&request, cserver, servableName.c_str(), servableVersion));
CREATE_GUARD(requestGuard, OVMS_InferenceRequest, request);

InferenceOutput output;
OVMS_Status* status{nullptr};
// PREPARE EACH INPUT
for (const auto& [name, input_tensor] : input) {
const char* realName = name.c_str();
const auto& ovShape = input_tensor.get_shape();
std::vector<int64_t> capiShape = getShapeAcceptableByCAPI(ovShape);
OVMS_DataType inputDataType = OVPrecision2CAPI(input_tensor.get_element_type());
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceRequestAddInput(request, realInputName, inputDataType, inputShape.data(), inputShape.size()));
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceRequestAddInput(request, realName, inputDataType, capiShape.data(), capiShape.size()));
const uint32_t NOT_USED_NUM = 0;
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceRequestInputSetData(request,
realInputName,
realName,
reinterpret_cast<void*>(input_tensor.data()),
input_tensor.get_byte_size(),
OVMS_BUFFERTYPE_CPU,
Expand Down Expand Up @@ -135,7 +213,7 @@ InferenceOutput OVMSInferenceAdapter::infer(const InferenceInput& input) {
const char* outputName{nullptr};
for (size_t i = 0; i < outputCount; ++i) {
ASSERT_CAPI_STATUS_NULL(OVMS_InferenceResponseOutput(response, i, &outputName, &datatype, &shape, &dimCount, &voutputData, &bytesize, &bufferType, &deviceId));
output[outputName] = makeOvTensor(datatype, shape, dimCount, voutputData, bytesize);
output.emplace(outputName, std::move(makeOvTensor(datatype, shape, dimCount, voutputData, bytesize)));
}
#if (OVMS_DUMP_TO_FILE == 1)
dumpOvTensorInput(output,"output");
Expand Down Expand Up @@ -169,16 +247,32 @@ void OVMSInferenceAdapter::loadModel(const std::shared_ptr<const ov::Model>& mod
inputMinMax.second.emplace_back(shapeMax[i]);
}
this->inShapesMinMaxes.insert({tensorName, std::move(inputMinMax)});
this->inputDatatypes.insert({tensorName, CAPI2OVPrecision(datatype)});
}
for (id = 0; id < outputCount; ++id) {
ASSERT_CAPI_STATUS_NULL(OVMS_ServableMetadataOutput(servableMetadata, id, &tensorName, &datatype, &dimCount, &shapeMin, &shapeMax));
outputNames.emplace_back(tensorName);
shape_min_max_t outputMinMax;
for (size_t i = 0; i < dimCount; ++i) {
outputMinMax.first.emplace_back(shapeMin[i]);
outputMinMax.second.emplace_back(shapeMax[i]);
}
this->outShapesMinMaxes.insert({tensorName, std::move(outputMinMax)});
this->outputDatatypes.insert({tensorName, CAPI2OVPrecision(datatype)});
}
const ov::AnyMap* servableMetadataRtInfo;
ASSERT_CAPI_STATUS_NULL(OVMS_ServableMetadataInfo(servableMetadata, reinterpret_cast<const void**>(&servableMetadataRtInfo)));
this->modelConfig = *servableMetadataRtInfo;
}

ov::element::Type_t OVMSInferenceAdapter::getInputDatatype(const std::string& inputName) const {
return inputDatatypes.at(inputName);
}

ov::element::Type_t OVMSInferenceAdapter::getOutputDatatype(const std::string& outputName) const {
return outputDatatypes.at(outputName);
}

ov::PartialShape OVMSInferenceAdapter::getInputShape(const std::string& inputName) const {
auto it = inShapesMinMaxes.find(inputName);
if (it == inShapesMinMaxes.end()) {
Expand All @@ -194,6 +288,21 @@ ov::PartialShape OVMSInferenceAdapter::getInputShape(const std::string& inputNam
}
return ovShape;
}
ov::PartialShape OVMSInferenceAdapter::getOutputShape(const std::string& outputName) const {
auto it = outShapesMinMaxes.find(outputName);
if (it == outShapesMinMaxes.end()) {
LOG(INFO) << "Could not find output:" << outputName;
throw std::runtime_error(std::string("Adapter could not find output:") + outputName);
}

ov::PartialShape ovShape;
const auto& [minBorder, maxBorder] = it->second;
ovShape.reserve(minBorder.size());
for (size_t i = 0; i < minBorder.size(); ++i) {
ovShape.emplace_back(ov::Dimension{minBorder[i], maxBorder[i]});
}
return ovShape;
}

std::vector<std::string> OVMSInferenceAdapter::getInputNames() const { return inputNames; }

Expand Down
7 changes: 7 additions & 0 deletions mediapipe/calculators/ovms/modelapiovmsadapter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@ class OVMSInferenceAdapter : public ::InferenceAdapter {
std::vector<std::string> inputNames;
std::vector<std::string> outputNames;
shapes_min_max_t inShapesMinMaxes;
shapes_min_max_t outShapesMinMaxes;
std::unordered_map<std::string, ov::element::Type_t> inputDatatypes;
std::unordered_map<std::string, ov::element::Type_t> outputDatatypes;
ov::AnyMap modelConfig;

public:
Expand All @@ -64,6 +67,7 @@ class OVMSInferenceAdapter : public ::InferenceAdapter {
}
virtual ~OVMSInferenceAdapter();
InferenceOutput infer(const InferenceInput& input) override;
void infer(const InferenceInput& input, InferenceOutput& output) override;
void loadModel(const std::shared_ptr<const ov::Model>& model, ov::Core& core,
const std::string& device, const ov::AnyMap& compilationConfig, size_t max_num_requests = 1) override;
void inferAsync(const InferenceInput& input, const CallbackData callback_args) override;
Expand All @@ -73,6 +77,9 @@ class OVMSInferenceAdapter : public ::InferenceAdapter {
void awaitAny();
size_t getNumAsyncExecutors() const;
ov::PartialShape getInputShape(const std::string& inputName) const override;
ov::PartialShape getOutputShape(const std::string& outputName) const override;
ov::element::Type_t getInputDatatype(const std::string& inputName) const override;
ov::element::Type_t getOutputDatatype(const std::string& outputName) const override;
std::vector<std::string> getInputNames() const override;
std::vector<std::string> getOutputNames() const override;
const ov::AnyMap& getModelConfig() const override;
Expand Down
8 changes: 8 additions & 0 deletions third_party/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,14 @@ cmake(
name = "opencv_cmake",
# Values to be passed as -Dkey=value on the CMake command line;
# here are serving to provide some CMake script configuration options
build_args = [
"--verbose",
"--", # <- Pass remaining options to the native tool.
# https://github.com/bazelbuild/rules_foreign_cc/issues/329
# there is no elegant parallel compilation support
"VERBOSE=1",
"-j 32",
],
cache_entries = {
"CMAKE_BUILD_TYPE": "Release",
# The module list is always sorted alphabetically so that we do not
Expand Down
11 changes: 11 additions & 0 deletions third_party/model_api/model_api.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# limitations under the License.
#

load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository")
def _is_windows(ctx):
return ctx.os.name.lower().find("windows") != -1

Expand Down Expand Up @@ -123,3 +124,13 @@ model_api_repository = repository_rule(
implementation = _impl,
local=False,
)

def workspace_model_api():
model_api_repository(name="_model-api")
new_git_repository(
name = "model_api",
remote = "https:///github.com/openvinotoolkit/model_api/",
build_file = "@_model-api//:BUILD",
commit = "25c88f8fd1ebe08447aca9a959a7a5f37751867e" # master 31th October 2024 Adjust cpp inference adapters for OVMS (#212)
)