diff --git a/source/api_cc/include/commonPD.h b/source/api_cc/include/commonPD.h deleted file mode 100644 index 952902225..000000000 --- a/source/api_cc/include/commonPD.h +++ /dev/null @@ -1,138 +0,0 @@ -// SPDX-License-Identifier: LGPL-3.0-or-later -#include -#include - -#include "paddle/include/paddle_inference_api.h" - -namespace deepmd { -/** - * @brief Check TensorFlow status. Exit if not OK. - * @param[in] status TensorFlow status. - **/ -// void check_status(const tensorflow::Status& status); - -/** - * @brief Get the value of a tensor. - * @param[in] session TensorFlow session. - * @param[in] name The name of the tensor. - * @param[in] scope The scope of the tensor. - * @return The value of the tensor. - **/ -template -VT predictor_get_scalar(const std::shared_ptr& predictor, - const std::string& name_); - -/** - * @brief Get the vector of a tensor. - * @param[out] o_vec The output vector. - * @param[in] session TensorFlow session. - * @param[in] name The name of the tensor. - * @param[in] scope The scope of the tensor. - **/ -// template -// void session_get_vector(std::vector& o_vec, -// tensorflow::Session* session, -// const std::string name_, -// const std::string scope = ""); - -/** - * @brief Get the type of a tensor. - * @param[in] session TensorFlow session. - * @param[in] name The name of the tensor. - * @param[in] scope The scope of the tensor. - * @return The type of the tensor as int. - **/ -paddle_infer::DataType predictor_get_dtype(const std::shared_ptr& predictor, - const std::string& name_); - -/** - * @brief Get input tensors. - * @param[out] input_tensors Input tensors. - * @param[in] dcoord_ Coordinates of atoms. - * @param[in] ntypes Number of atom types. - * @param[in] datype_ Atom types. - * @param[in] dbox Box matrix. - * @param[in] cell_size Cell size. - * @param[in] fparam_ Frame parameters. - * @param[in] aparam_ Atom parameters. - * @param[in] atommap Atom map. - * @param[in] scope The scope of the tensors. - * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is - * nall. - */ -template -int predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const bool aparam_nall = false); - -/** - * @brief Get input tensors. - * @param[out] input_tensors Input tensors. - * @param[in] dcoord_ Coordinates of atoms. - * @param[in] ntypes Number of atom types. - * @param[in] datype_ Atom types. - * @param[in] dlist Neighbor list. - * @param[in] fparam_ Frame parameters. - * @param[in] aparam_ Atom parameters. - * @param[in] atommap Atom map. - * @param[in] nghost Number of ghost atoms. - * @param[in] ago Update the internal neighbour list if ago is 0. - * @param[in] scope The scope of the tensors. - * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is - * nall. - */ -template -int predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - InputNlist& dlist, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const int nghost, - const int ago, - const bool aparam_nall = false); - -/** - * @brief Get input tensors for mixed type. - * @param[out] input_tensors Input tensors. - * @param[in] nframes Number of frames. - * @param[in] dcoord_ Coordinates of atoms. - * @param[in] ntypes Number of atom types. - * @param[in] datype_ Atom types. - * @param[in] dlist Neighbor list. - * @param[in] fparam_ Frame parameters. - * @param[in] aparam_ Atom parameters. - * @param[in] atommap Atom map. - * @param[in] nghost Number of ghost atoms. - * @param[in] ago Update the internal neighbour list if ago is 0. - * @param[in] scope The scope of the tensors. - * @param[in] aparam_nall Whether the atomic dimesion of atomic parameters is - * nall. - */ -template -int predictor_input_tensors_mixed_type( - const std::shared_ptr& predictor, - const int& nframes, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const bool aparam_nall = false); - -} // namespace deepmd diff --git a/source/api_cc/src/DeepPotPD.cc b/source/api_cc/src/DeepPotPD.cc index ed6a42381..63b436980 100644 --- a/source/api_cc/src/DeepPotPD.cc +++ b/source/api_cc/src/DeepPotPD.cc @@ -39,8 +39,11 @@ void DeepPotPD::init(const std::string& model, << std::endl; return; } + // NOTE: There is no custom operators need to be loaded now. // deepmd::load_op_library(); - int gpu_num = 1; // Only support 1 GPU now. + + // NOTE: Only support 1 GPU now. + int gpu_num = 1; if (gpu_num > 0) { gpu_id = gpu_rank % gpu_num; } else { @@ -74,14 +77,13 @@ void DeepPotPD::init(const std::string& model, config->DisableGpu(); std::cout << "load model from: " << model << " to cpu " << std::endl; } else { - std::cout << "load model from: " << model << " to gpu " << gpu_id - << std::endl; + std::cout << "load model from: " << model << " to gpu " << gpu_id << std::endl; } + // NOTE: Both set to 1 now. // get_env_nthreads(num_intra_nthreads, // num_inter_nthreads); // need to be fixed as // // DP_INTRA_OP_PARALLELISM_THREADS - // both set to 1 now. // num_intra_nthreads = 1; num_inter_nthreads = 1; if (num_inter_nthreads) { @@ -90,7 +92,6 @@ void DeepPotPD::init(const std::string& model, predictor = paddle_infer::CreatePredictor(*config); - // initialize hyper params from model buffers ntypes_spin = 0; DeepPotPD::get_buffer("buffer_has_message_passing", do_message_passing); @@ -172,7 +173,7 @@ void DeepPotPD::compute(ENERGYVTYPE& ener, } if (do_message_passing == 1 && nghost == 0) { throw deepmd::deepmd_exception( - "do_message_passing == 1 && nghost == 0" + "(do_message_passing == 1 && nghost == 0) is not supported yet." ); } } diff --git a/source/api_cc/src/common.cc b/source/api_cc/src/common.cc index aef320456..1fb38fd29 100644 --- a/source/api_cc/src/common.cc +++ b/source/api_cc/src/common.cc @@ -34,12 +34,6 @@ using namespace tensorflow; #endif -#ifdef BUILD_PADDLE -#include "commonPD.h" -#include "google/protobuf/io/zero_copy_stream_impl.h" -#include "google/protobuf/text_format.h" -#endif - static std::vector split(const std::string& input_, const std::string& delimiter) { std::string input = input_; @@ -928,476 +922,6 @@ int deepmd::session_get_dtype(tensorflow::Session* session, } #endif -#ifdef BUILD_PADDLE -template -int deepmd::predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam__, - const deepmd::AtomMap& atommap, - const bool aparam_nall) { - // if datype.size is 0, not clear nframes; but 1 is just ok - int nframes = datype_.size() > 0 ? (dcoord_.size() / 3 / datype_.size()) : 1; - int nall = datype_.size(); - int nloc = nall; - assert(nall * 3 * nframes == dcoord_.size()); - bool b_pbc = (dbox.size() == nframes * 9); - - std::vector datype = atommap.get_type(); - std::vector type_count(ntypes, 0); - for (unsigned ii = 0; ii < datype.size(); ++ii) { - type_count[datype[ii]]++; - } - datype.insert(datype.end(), datype_.begin() + nloc, datype_.end()); - - // 准备输入Tensor句柄 - auto input_names = predictor->GetInputNames(); - auto coord_handle = predictor->GetInputHandle(input_names[0]); - auto type_handle = predictor->GetInputHandle(input_names[1]); - auto natoms_handle = predictor->GetInputHandle(input_names[2]); - auto box_handle = predictor->GetInputHandle(input_names[3]); - auto mesh_handle = predictor->GetInputHandle(input_names[4]); - - // 设置输入 Tensor 的维度信息 - std::vector coord_shape = {nframes, nall * 3}; - std::vector atype_shape = {nframes, nall}; - std::vector box_shape = {nframes, 9}; - std::vector mesh_shape; - if (b_pbc) { - mesh_shape = std::vector({6}); - } else { - mesh_shape = std::vector({0}); - } - - std::vector natoms_shape = {2 + ntypes}; - - coord_handle->Reshape(coord_shape); - type_handle->Reshape(atype_shape); - natoms_handle->Reshape(natoms_shape); - box_handle->Reshape(box_shape); - mesh_handle->Reshape(mesh_shape); - - paddle_infer::DataType model_type; - if (std::is_same::value) { - model_type = paddle_infer::DataType::FLOAT64; - } else if (std::is_same::value) { - model_type = paddle_infer::DataType::FLOAT32; - } else { - throw deepmd::deepmd_exception("unsupported data type"); - } - - std::vector dcoord(dcoord_); - atommap.forward(dcoord.begin(), dcoord_.begin(), 3, nframes, nall); - std::vector aparam_(aparam__); - if ((aparam_nall ? nall : nloc) > 0) { - atommap.forward( - aparam_.begin(), aparam__.begin(), - aparam__.size() / nframes / (aparam_nall ? nall : nloc), nframes, - (aparam_nall ? nall : nloc)); - } - - // 发送输入数据到Tensor句柄 - coord_handle->CopyFromCpu(dcoord.data()); - if (b_pbc) { - box_handle->CopyFromCpu(dbox.data()); - } else { - std::vector zero = dbox; - std::fill(zero.begin(), zero.end(), 0); - box_handle->CopyFromCpu(zero.data()); - } - std::vector datype_rep(nframes * nall, 0); - for (int ii = 0; ii < nframes; ++ii) { - for (int jj = 0; jj < nall; ++jj) { - datype_rep[ii * nall + jj] = datype[jj]; - } - } - type_handle->CopyFromCpu(datype_rep.data()); - std::vector mesh; - if (b_pbc) { - mesh = std::vector(6); - mesh[1 - 1] = 0; - mesh[2 - 1] = 0; - mesh[3 - 1] = 0; - mesh[4 - 1] = 0; - mesh[5 - 1] = 0; - mesh[6 - 1] = 0; - } else { - mesh = std::vector(0); - } - mesh_handle->CopyFromCpu(mesh.data()); - std::vector natoms = {nloc, nall}; - for (int ii = 0; ii < ntypes; ++ii) { - natoms.push_back(type_count[ii]); - } - natoms_handle->CopyFromCpu(natoms.data()); - - return nloc; -} - -template -int deepmd::predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - InputNlist& dlist, - const std::vector& fparam_, - const std::vector& aparam__, - const deepmd::AtomMap& atommap, - const int nghost, - const int ago, - const bool aparam_nall) { - // if datype.size is 0, not clear nframes; but 1 is just ok - int nframes = datype_.size() > 0 ? (dcoord_.size() / 3 / datype_.size()) : 1; - int nall = datype_.size(); - int nloc = nall - nghost; - assert(nall * 3 * nframes == dcoord_.size()); - assert(dbox.size() == nframes * 9); - - std::vector datype = atommap.get_type(); - // for (int i=0; i type_count(ntypes, 0); - for (unsigned ii = 0; ii < datype.size(); ++ii) { - type_count[datype[ii]]++; - } - datype.insert(datype.end(), datype_.begin() + nloc, datype_.end()); - - // 准备输入Tensor句柄 - auto input_names = predictor->GetInputNames(); - // for (auto &ss: input_names) - // { - // std::cout << "input_name: " << " " << ss << std::endl; - // } - auto coord_handle = predictor->GetInputHandle(input_names[0]); - auto type_handle = predictor->GetInputHandle(input_names[1]); - // auto natoms_handle = predictor->GetInputHandle(input_names[2]); - auto box_handle = predictor->GetInputHandle(input_names[2]); - // auto mesh_handle = predictor->GetInputHandle(input_names[4]); - - // 设置输入 Tensor 的维度信息 - std::vector coord_shape = {nframes, nall, 3}; - std::vector coord_shape_flat = {nframes, nall * 3}; - - std::vector atype_shape = {nframes, nall}; - std::vector atype_shape_flat = {nframes, nall}; - - std::vector box_shape = {nframes, 3, 3}; - std::vector box_shape_flat = {nframes * 9}; - // std::vector mesh_shape = std::vector({16}); - // std::vector natoms_shape = {2 + ntypes}; - - paddle_infer::DataType model_type; - if (std::is_same::value) { - model_type = paddle_infer::DataType::FLOAT64; - } else if (std::is_same::value) { - model_type = paddle_infer::DataType::FLOAT32; - } else { - throw deepmd::deepmd_exception("unsupported data type"); - } - - coord_handle->Reshape(coord_shape_flat); - box_handle->Reshape(box_shape_flat); - type_handle->Reshape(atype_shape_flat); - // printf("coord.shape = ["); - // for (auto &d: coord_shape) - // { - // printf("%d, ", d); - // } - // printf("]\n"); - - // printf("type.shape = ["); - // for (auto &d: atype_shape) - // { - // printf("%d, ", d); - // } - // printf("]\n"); - - // printf("box.shape = ["); - // for (auto &d: box_shape) - // { - // printf("%d, ", d); - // } - // printf("]\n"); - // mesh_handle->Reshape(mesh_shape); - // natoms_handle->Reshape(natoms_shape); - - std::vector dcoord(dcoord_); - atommap.forward(dcoord.begin(), dcoord_.begin(), 3, nframes, nall); //012 - std::vector aparam_(aparam__); - if ((aparam_nall ? nall : nloc) > 0) { - atommap.forward( - aparam_.begin(), aparam__.begin(), - aparam__.size() / nframes / (aparam_nall ? nall : nloc), nframes, - (aparam_nall ? nall : nloc)); - } - - // const std::string filename = "/workspace/hesensen/deepmd_backend/deepmd_paddle_new/examples/water/lmp/coord_torch.log"; - // std::ifstream inputFile(filename); - // VALUETYPE number; - // int iii = 0; - // while (inputFile >> number) { - // dcoord[iii] = number; - // ++iii; - // } - // printf("dcoord finished, iii = %d\n", iii); - // inputFile.close(); - - // 发送输入数据到Tensor句柄 - coord_handle->CopyFromCpu(dcoord.data()); - coord_handle->Reshape(coord_shape); - box_handle->CopyFromCpu(dbox.data()); - box_handle->Reshape(box_shape); - // for (int i = 0; i < dcoord.size(); ++i) - // { - // printf("dcoord[%d] = %.6lf\n", i, dcoord[i]); - // } - std::vector datype_rep(nframes * nall, 0); - for (int ii = 0; ii < nframes; ++ii) { - for (int jj = 0; jj < nall; ++jj) { - datype_rep[ii * nall + jj] = datype[jj]; - } - } - // const std::string filename1 = "/workspace/hesensen/deepmd_backend/deepmd_paddle_new/examples/water/lmp/type_torch.log"; - // std::ifstream inputFile1(filename1); - // int number_int; - // iii = 0; - // while (inputFile1 >> number_int) { - // datype_rep[iii] = number_int; - // ++iii; - // } - // printf("atype finishied, iii = %d\n", iii); - // inputFile1.close(); - - type_handle->CopyFromCpu(datype_rep.data()); - // for (int i = 0; i < datype_rep.size(); ++i) - // { - // printf("%d\n", datype_rep[i]); - // } - type_handle->Reshape(atype_shape); - // std::vector mesh(mesh_shape[0], 0); - // for (int ii = 0; ii < 16; ++ii) { - // mesh[ii] = 0; - // } - // const int stride = sizeof(int*) / sizeof(int); - // assert(stride * sizeof(int) == sizeof(int*)); - // assert(stride <= 4); - // mesh[0] = ago; - // mesh[1] = dlist.inum; - // mesh[2] = 0; - // mesh[3] = 0; - // memcpy(&mesh[4], &(dlist.ilist), sizeof(int*)); - // memcpy(&mesh[8], &(dlist.numneigh), sizeof(int*)); - // memcpy(&mesh[12], &(dlist.firstneigh), sizeof(int**)); - // mesh_handle->CopyFromCpu(mesh.data()); - - // std::vector natoms = {nloc, nall}; - // for (int ii = 0; ii < ntypes; ++ii) { - // natoms.push_back(type_count[ii]); - // } - // natoms_handle->CopyFromCpu(natoms.data()); - // printf("finished predictor_input_tensors\n"); - // printf("nloc = %d\n", nloc); - return nloc; -} - -template -int deepmd::predictor_input_tensors_mixed_type( - const std::shared_ptr& predictor, - const int& nframes, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam__, - const deepmd::AtomMap& atommap, - const bool aparam_nall) { - int nall = datype_.size() / nframes; - int nloc = nall; - assert(nall * 3 * nframes == dcoord_.size()); - bool b_pbc = (dbox.size() == nframes * 9); - - std::vector datype(datype_); - atommap.forward(datype.begin(), datype_.begin(), 1, nframes, nall); - - auto input_names = predictor->GetInputNames(); - auto coord_handle = predictor->GetInputHandle(input_names[0]); - auto type_handle = predictor->GetInputHandle(input_names[1]); - auto box_handle = predictor->GetInputHandle(input_names[3]); - auto mesh_handle = predictor->GetInputHandle(input_names[4]); - auto natoms_handle = predictor->GetInputHandle(input_names[2]); - - // 设置输入 Tensor 的维度信息 - std::vector coord_shape = {nframes, nall * 3}; - std::vector atype_shape = {nframes, nall}; - std::vector box_shape = {nframes, 9}; - std::vector mesh_shape; - if (b_pbc) { - mesh_shape = std::vector({7}); - } else { - mesh_shape = std::vector({1}); - } - std::vector natoms_shape = {2 + ntypes}; - - coord_handle->Reshape(coord_shape); - type_handle->Reshape(atype_shape); - box_handle->Reshape(box_shape); - mesh_handle->Reshape(mesh_shape); - natoms_handle->Reshape(natoms_shape); - - paddle_infer::DataType model_type; - if (std::is_same::value) { - model_type = paddle_infer::DataType::FLOAT64; - } else if (std::is_same::value) { - model_type = paddle_infer::DataType::FLOAT32; - } else { - throw deepmd::deepmd_exception("unsupported data type"); - } - - std::vector dcoord(dcoord_); - atommap.forward(dcoord.begin(), dcoord_.begin(), 3, nframes, nall); - std::vector aparam_(aparam__); - if ((aparam_nall ? nall : nloc) > 0) { - atommap.forward( - aparam_.begin(), aparam__.begin(), - aparam__.size() / nframes / (aparam_nall ? nall : nloc), nframes, - (aparam_nall ? nall : nloc)); - } - // coord - coord_handle->CopyFromCpu(dcoord.data()); - - // box - if (b_pbc) { - box_handle->CopyFromCpu(dbox.data()); - } else { - std::vector zero = dbox; - std::fill(zero.begin(), zero.end(), 0); - box_handle->CopyFromCpu(zero.data()); - } - - // datype - std::vector datype_rep(nframes * nall, 0); - for (int ii = 0; ii < nframes; ++ii) { - for (int jj = 0; jj < nall; ++jj) { - datype_rep[ii * nall + jj] = datype[jj]; - } - } - type_handle->CopyFromCpu(datype_rep.data()); - // mesh - std::vector mesh; - if (b_pbc) { - mesh = std::vector(7, 0); - mesh[1 - 1] = 0; - mesh[2 - 1] = 0; - mesh[3 - 1] = 0; - mesh[4 - 1] = 0; - mesh[5 - 1] = 0; - mesh[6 - 1] = 0; - mesh[7 - 1] = 0; - } else { - mesh = std::vector(1, 0); - mesh[1 - 1] = 0; - } - mesh_handle->CopyFromCpu(mesh.data()); - //natoms - std::vector natoms_pad = {nloc, nall, nall}; - if (ntypes > 1) { - for (int ii = 0; ii < ntypes; ++ii) { - natoms_pad.push_back(0); - } - } - natoms_handle->CopyFromCpu(natoms_pad.data()); - - // if (fparam_.size() > 0) { - // input_tensors.push_back({prefix + "t_fparam", fparam_tensor}); - // } - // if (aparam_.size() > 0) { - // input_tensors.push_back({prefix + "t_aparam", aparam_tensor}); - // } - return nloc; -} - -#endif - -#ifdef BUILD_PADDLE -template -VT deepmd::predictor_get_scalar( - const std::shared_ptr& predictor, - const std::string& name_) { - if (std::is_same::value) { - /* - NOTE: Convert from ascii code(int64) to std::string, - A workaround for string data type is not supported in Paddle yet. - */ - auto scalar_tensor = predictor->GetOutputHandle(name_); - if (scalar_tensor->shape().size() == 0) { - return VT(); - } - const auto& shape = scalar_tensor->shape(); - const int& str_len = std::accumulate(std::begin(shape), std::end(shape), 1, - std::multiplies<>{}); - if (str_len == 0) { - return VT(); - } - int32_t* scalar_ptr = (int32_t*)malloc(str_len * sizeof(int32_t)); - scalar_tensor->CopyToCpu(scalar_ptr); - VT ret; - for (int ii = 0; ii < str_len; ++ii) { - ret += (char)scalar_ptr[ii]; - } - free(scalar_ptr); - return ret; - } else { - /* Vanillia process for other data type below*/ - auto scalar_tensor = predictor->GetOutputHandle(name_); - // VT* scalar_ptr = (VT*)malloc(1 * sizeof(VT)); - std::unique_ptr scalar_ptr(new VT); - scalar_tensor->CopyToCpu(scalar_ptr.get()); - return (*scalar_ptr); - } -} - - -// template -// void deepmd::session_get_vector(std::vector& o_vec, -// Session* session, -// const std::string name_, -// const std::string scope) { -// std::string name = name_; -// if (scope != "") { -// name = scope + "/" + name; -// } -// std::vector output_tensors; -// deepmd::check_status( -// session->Run(std::vector>({}), -// {name.c_str()}, {}, &output_tensors)); -// Tensor output_rc = output_tensors[0]; -// assert(1 == output_rc.shape().dims()); -// int dof = output_rc.shape().dim_size(0); -// o_vec.resize(dof); -// auto orc = output_rc.flat(); -// for (int ii = 0; ii < dof; ++ii) { -// o_vec[ii] = orc(ii); -// } -// } - -paddle_infer::DataType deepmd::predictor_get_dtype( - const std::shared_ptr& predictor, - const std::string& name_) { - auto scalar_tensor = predictor->GetOutputHandle(name_); - return scalar_tensor->type(); -} - -#endif - template void deepmd::select_map(std::vector& out, const std::vector& in, @@ -1502,19 +1026,6 @@ template void deepmd::session_get_vector(std::vector&, const std::string); #endif -#ifdef BUILD_PADDLE -template int deepmd::predictor_get_scalar(const std::shared_ptr& predictor, - const std::string &name_); - -template bool deepmd::predictor_get_scalar(const std::shared_ptr& predictor, - const std::string &name_); - -// template void deepmd::session_get_vector(std::vector&, -// Session*, -// const std::string, -// const std::string); -#endif - template void deepmd::select_map(std::vector& out, const std::vector& in, const std::vector& idx_map, @@ -1554,12 +1065,6 @@ template void deepmd::session_get_vector(std::vector&, const std::string); #endif -#ifdef BUILD_PADDLE -template float deepmd::predictor_get_scalar(const std::shared_ptr& predictor, - const std::string &name_); - -#endif - template void deepmd::select_map(std::vector& out, const std::vector& in, const std::vector& idx_map, @@ -1599,11 +1104,6 @@ template void deepmd::session_get_vector(std::vector&, const std::string); #endif -#ifdef BUILD_PADDLE -template double deepmd::predictor_get_scalar(const std::shared_ptr& predictor, - const std::string& name_); -#endif - template void deepmd::select_map(std::vector& out, const std::vector& in, const std::vector& idx_map, @@ -1673,46 +1173,6 @@ template void deepmd::select_map_inv( const int& stride); #endif -#ifdef BUILD_PADDLE -template std::string deepmd::predictor_get_scalar( - const std::shared_ptr& predictor, const std::string &name_); - -// template void deepmd::session_get_vector( -// std::vector&, -// const std::shared_ptr& predictor, -// const std::string); - -template void deepmd::select_map( - std::vector& out, - const std::vector& in, - const std::vector& idx_map, - const int& stride, - const int& nframes, - const int& nall1, - const int& nall2); - -template void deepmd::select_map( - typename std::vector::iterator out, - const typename std::vector::const_iterator in, - const std::vector& idx_map, - const int& stride, - const int& nframes, - const int& nall1, - const int& nall2); - -template void deepmd::select_map_inv( - std::vector& out, - const std::vector& in, - const std::vector& idx_map, - const int& stride); - -template void deepmd::select_map_inv( - typename std::vector::iterator out, - const typename std::vector::const_iterator in, - const std::vector& idx_map, - const int& stride); -#endif - void deepmd::read_file_to_string(std::string model, std::string& file_content) { // generated by GitHub Copilot std::ifstream file(model); @@ -1907,158 +1367,6 @@ template int deepmd::session_input_tensors_mixed_type( const bool aparam_nall); #endif -#ifdef BUILD_PADDLE -template int deepmd::predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const bool aparam_nall); -template int deepmd::predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const bool aparam_nall); - -template int deepmd::predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const bool aparam_nall); -template int deepmd::predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const bool aparam_nall); - -template int deepmd::predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - InputNlist& dlist, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const int nghost, - const int ago, - const bool aparam_nall); -template int deepmd::predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - InputNlist& dlist, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const int nghost, - const int ago, - const bool aparam_nall); - -template int deepmd::predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - InputNlist& dlist, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const int nghost, - const int ago, - const bool aparam_nall); -template int deepmd::predictor_input_tensors( - const std::shared_ptr& predictor, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - InputNlist& dlist, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const int nghost, - const int ago, - const bool aparam_nall); - -template int deepmd::predictor_input_tensors_mixed_type( - const std::shared_ptr& predictor, - const int& nframes, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const bool aparam_nall); -template int deepmd::predictor_input_tensors_mixed_type( - const std::shared_ptr& predictor, - const int& nframes, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const bool aparam_nall); - -template int deepmd::predictor_input_tensors_mixed_type( - const std::shared_ptr& predictor, - const int& nframes, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const bool aparam_nall); -template int deepmd::predictor_input_tensors_mixed_type( - const std::shared_ptr& predictor, - const int& nframes, - const std::vector& dcoord_, - const int& ntypes, - const std::vector& datype_, - const std::vector& dbox, - const double& cell_size, - const std::vector& fparam_, - const std::vector& aparam_, - const deepmd::AtomMap& atommap, - const bool aparam_nall); -#endif - void deepmd::print_summary(const std::string& pre) { int num_intra_nthreads, num_inter_nthreads; deepmd::get_env_nthreads(num_intra_nthreads, num_inter_nthreads);