Skip to content

Commit

Permalink
fix OPENVINO_DEBUG
Browse files Browse the repository at this point in the history
  • Loading branch information
CuriousPanCake committed Oct 1, 2024
1 parent 5db9413 commit 569a0cb
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
6 changes: 3 additions & 3 deletions modules/llama_cpp_plugin/src/compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@ LlamaCppModel::LlamaCppModel(const std::string& gguf_fname,
: ICompiledModel(nullptr, plugin),
m_gguf_fname(gguf_fname),
m_num_threads(num_threads) {
OPENVINO_DEBUG << "llama_cpp_plugin: loading llama model directly from GGUF... " << std::endl;
OPENVINO_DEBUG("llama_cpp_plugin: loading llama model directly from GGUF... ")
llama_model_params mparams = llama_model_default_params();
mparams.n_gpu_layers = 99;
m_llama_model_ptr = llama_load_model_from_file(gguf_fname.c_str(), mparams);
OPENVINO_DEBUG << "llama_cpp_plugin: llama model loaded successfully from GGUF..." << std::endl;
OPENVINO_DEBUG("llama_cpp_plugin: llama model loaded successfully from GGUF...")

auto input_ids = std::make_shared<ov::opset13::Parameter>(ov::element::Type_t::i64, ov::PartialShape({-1, -1}));
auto fake_convert = std::make_shared<ov::opset13::Convert>(input_ids->output(0), ov::element::Type_t::f32);
Expand Down Expand Up @@ -71,7 +71,7 @@ std::shared_ptr<const ov::Model> LlamaCppModel::get_runtime_model() const {
}

void LlamaCppModel::set_property(const ov::AnyMap& properties) {
OPENVINO_DEBUG << "llama_cpp_plugin: attempted to set_property (did nothing)";
OPENVINO_DEBUG("llama_cpp_plugin: attempted to set_property (did nothing)");
}

ov::Any LlamaCppModel::get_property(const std::string& name) const {
Expand Down
8 changes: 4 additions & 4 deletions modules/llama_cpp_plugin/src/infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ void allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor,
LlamaCppSyncInferRequest::LlamaCppSyncInferRequest(const std::shared_ptr<const LlamaCppModel>& compiled_model,
size_t num_threads)
: ov::ISyncInferRequest(compiled_model) {
OPENVINO_DEBUG << "llama_cpp_plugin: infer request ctor called\n";
OPENVINO_DEBUG("llama_cpp_plugin: infer request ctor called");
llama_context_params cparams = llama_context_default_params();
cparams.n_threads = num_threads ? num_threads : std::thread::hardware_concurrency();
cparams.n_ctx = 0; // this means that the actual n_ctx will be taken equal to the model's train-time value
Expand All @@ -51,7 +51,7 @@ LlamaCppSyncInferRequest::LlamaCppSyncInferRequest(const std::shared_ptr<const L
}
void LlamaCppSyncInferRequest::set_tensors_impl(const ov::Output<const ov::Node> port,
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) {
OPENVINO_DEBUG << "llama_cpp_plugin: set_tensors_impl called\n";
OPENVINO_DEBUG("llama_cpp_plugin: set_tensors_impl called");
}

void llama_batch_add_reimpl(struct llama_batch& batch,
Expand Down Expand Up @@ -131,12 +131,12 @@ void LlamaCppSyncInferRequest::infer() {
llama_batch_free(batch);
};
std::vector<ov::ProfilingInfo> LlamaCppSyncInferRequest::get_profiling_info() const {
OPENVINO_DEBUG << "llama_cpp_plugin: get_profiling_info() called\n";
OPENVINO_DEBUG("llama_cpp_plugin: get_profiling_info() called");
return std::vector<ov::ProfilingInfo>{};
};

std::vector<ov::SoPtr<ov::IVariableState>> LlamaCppSyncInferRequest::query_state() const {
OPENVINO_DEBUG << "llama_cpp_plugin: query_state() called\n";
OPENVINO_DEBUG("llama_cpp_plugin: query_state() called");
return {std::static_pointer_cast<ov::IVariableState>(std::make_shared<LlamaCppState>(m_llama_ctx))};
}

Expand Down

0 comments on commit 569a0cb

Please sign in to comment.