From 23949d5d56bf9c82b2516acc77e8b751037186e9 Mon Sep 17 00:00:00 2001 From: "mei, yang" Date: Tue, 19 Mar 2024 15:25:45 +0800 Subject: [PATCH] Enable Paddle FastSpeech2 model (#23311) ### Details: - *Enable Paddle FastSpeech2 model* - *fix issue in 'set_value'* - *add 'round' op* ### Tickets: - *CVS-134638* --- src/frontends/paddle/src/op/round.cpp | 22 +++++++ src/frontends/paddle/src/op/set_value.cpp | 61 +++++++++++++------ src/frontends/paddle/src/op_table.cpp | 2 + src/frontends/paddle/tests/op_fuzzy.cpp | 2 + .../test_models/gen_scripts/generate_round.py | 40 ++++++++++++ .../gen_scripts/generate_set_value.py | 18 +++++- 6 files changed, 125 insertions(+), 20 deletions(-) create mode 100644 src/frontends/paddle/src/op/round.cpp create mode 100644 src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py diff --git a/src/frontends/paddle/src/op/round.cpp b/src/frontends/paddle/src/op/round.cpp new file mode 100644 index 00000000000000..f981fa1e841843 --- /dev/null +++ b/src/frontends/paddle/src/op/round.cpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs round(const NodeContext& node) { + return node.default_single_output_mapping( + {std::make_shared(node.get_input("X"), + ov::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO)}, + {"Out"}); +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/set_value.cpp b/src/frontends/paddle/src/op/set_value.cpp index 63260b60da45c4..94c851479595ed 100644 --- a/src/frontends/paddle/src/op/set_value.cpp +++ b/src/frontends/paddle/src/op/set_value.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "default_opset.hpp" #include "openvino/frontend/paddle/node_context.hpp" @@ -20,11 +22,11 @@ std::shared_ptr handle_minus_index(const std::vector& node, const return new_node; } -// std::shared_ptr handle_maximum_index(Output& node, const Output& update_node) { -// const auto maximum_node = default_opset::Constant::create(element::i64, {1}, {INT_MAX}); -// const auto mask = std::make_shared(node, maximum_node); -// return std::make_shared(mask, update_node, node); -// } +std::shared_ptr handle_maximum_index(Output& node, const Output& update_node) { + const auto maximum_node = default_opset::Constant::create(element::i64, {1}, {std::numeric_limits::max()}); + const auto mask = std::make_shared(node, maximum_node); + return std::make_shared(mask, update_node, node); +} bool is_contain_minus(const std::vector vec) { for (int64_t i : vec) { @@ -52,14 +54,14 @@ NamedOutputs set_value(const NodeContext& node) { // Given: // input_data: shape(5, 6, 7, 8, 9) // update_value: shape(1, 6, 3, 3) - // operation: input_data[:, :, 2: 7: 2, -4: -1] = update_value + // operation: input_data[:, :, 2: 7: 2, -4: -1, :] = update_value // axes = [2, 3] // starts = [2, -4] // ends = [7, -1] // steps = [2, 1] // Our process is: // 1. Get axes [2, 3], get shape of input [5, 6, 7, 8, 9], select dimension from shape by axes: [7, 8]. - // 2. Get starts [2, -4] and ends [3, -1]. Process minus starts and ends. starts: [2, 4], ends: [7, 7]. + // 2. Get starts [2, -4] and ends [7, -1]. Process minus starts and ends. starts: [2, 4], ends: [7, 7]. // 3. Calculate starts_node, ends_node and steps_node // 1. Create `starts node` filled with 0. Update `starts` to `starts_node` according to axes. // starts_node[axes[i]] = starts[i] for i in axes.size @@ -92,25 +94,41 @@ NamedOutputs set_value(const NodeContext& node) { const auto slice_shape = default_opset::Constant::create(ov::element::i64, {1, 1}, {-1}); // get positive starts ends and steps - if (node.has_input("StartsTensorList") && node.has_input("StepsTensorList") && node.has_input("EndsTensorList")) { + if (node.has_input("StartsTensorList")) { starts = handle_minus_index(node.get_ng_inputs("StartsTensorList"), spec_dim_node); - ends = handle_minus_index(node.get_ng_inputs("EndsTensorList"), spec_dim_node); - steps = std::make_shared(node.get_ng_inputs("StepsTensorList"), 0); - } else if (node.has_attribute("starts") && node.has_attribute("steps") && node.has_attribute("ends")) { - const auto start_vec = node.get_attribute>("starts"); - const auto ends_vec = node.get_attribute>("ends"); - const auto step_vec = node.get_attribute>("steps"); - if (is_contain_minus(start_vec) || is_contain_minus(ends_vec) || is_contain_minus(step_vec)) { - PADDLE_OP_CHECK(node, (false), "Currently not support minus start, ends and steps!"); + } else if (node.has_attribute("starts")) { + auto start_vec = node.get_attribute>("starts"); + if (is_contain_minus(start_vec)) { + PADDLE_OP_CHECK(node, (false), "Currently not support minus start!"); } starts = handle_minus_index(start_vec, spec_dim_node); + } else + PADDLE_OP_CHECK(node, (false), "Invalid arguments!"); + + if (node.has_input("EndsTensorList")) { + ends = handle_minus_index(node.get_ng_inputs("EndsTensorList"), spec_dim_node); + } else if (node.has_attribute("ends")) { + auto ends_vec = node.get_attribute>("ends"); + if (is_contain_minus(ends_vec)) { + PADDLE_OP_CHECK(node, (false), "Currently not support minus ends!"); + } ends = handle_minus_index(ends_vec, spec_dim_node); - steps = default_opset::Constant::create(element::i64, {step_vec.size()}, step_vec); + } else + PADDLE_OP_CHECK(node, (false), "Invalid arguments!"); + + if (node.has_input("StepsTensorList")) { + steps = handle_minus_index(node.get_ng_inputs("StepsTensorList"), spec_dim_node); + } else if (node.has_attribute("steps")) { + auto step_vec = node.get_attribute>("steps"); + if (is_contain_minus(step_vec)) { + PADDLE_OP_CHECK(node, (false), "Currently not support minus steps!"); + } + steps = handle_minus_index(step_vec, spec_dim_node); } else PADDLE_OP_CHECK(node, (false), "Invalid arguments!"); // for unsepcified end: x[::2], end will be 2147483647 - // ends = handle_maximum_index(ends, spec_dim_node); + ends = handle_maximum_index(ends, spec_dim_node); // 3.1 get starts node starts_node = @@ -142,7 +160,12 @@ NamedOutputs set_value(const NodeContext& node) { std::make_shared(input_shape, axes_node, value_shape_update_node); // 4.5 broadcast - value_node = std::make_shared(value_node, value_target_shape); + auto value_shape = std::make_shared(value_node); + auto value_rank = std::make_shared(value_shape); + auto value_rank_scalar = std::make_shared(value_rank); + Output broadcast_axes = + std::make_shared(zero_node, value_rank_scalar, one_node, element::i64); + value_node = std::make_shared(value_node, value_target_shape, broadcast_axes); // get total number of elements const auto numel_node = std::make_shared(input_shape, zero_node); diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index c22441c72d85cd..3030f140996de2 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -98,6 +98,7 @@ OP_CONVERTER(reshape2); OP_CONVERTER(reverse); OP_CONVERTER(rnn); OP_CONVERTER(roi_align); +OP_CONVERTER(round); OP_CONVERTER(scale); OP_CONVERTER(select_input); OP_CONVERTER(set_value); @@ -233,6 +234,7 @@ std::map get_supported_ops() { {"reverse", op::reverse}, {"rnn", op::rnn}, {"roi_align", op::roi_align}, + {"round", op::round}, {"scale", op::scale}, {"select_input", op::select_input}, {"set_value", op::set_value}, diff --git a/src/frontends/paddle/tests/op_fuzzy.cpp b/src/frontends/paddle/tests/op_fuzzy.cpp index d1d9d913004a3a..ecc7fe619195b6 100644 --- a/src/frontends/paddle/tests/op_fuzzy.cpp +++ b/src/frontends/paddle/tests/op_fuzzy.cpp @@ -490,6 +490,7 @@ static const std::vector models{ std::string("rnn_lstm_layer_2_bidirectional_seq_len_4/rnn_lstm_layer_2_bidirectional_seq_len_4.pdmodel"), std::string("roi_align_test"), std::string("roi_align_test2"), + std::string("round"), std::string("scale_bias_after_float32"), std::string("scale_bias_after_int32"), std::string("scale_bias_after_int64"), @@ -505,6 +506,7 @@ static const std::vector models{ std::string("set_value5"), // std::string("set_value6"), // std::string("set_value7"), + // std::string("set_value8"), // std::string("set_value_dynamic1"), std::string("set_value_dynamic2"), std::string("shape"), diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py new file mode 100644 index 00000000000000..18d93ded10d7bf --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py @@ -0,0 +1,40 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# round paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + +data_type = 'float32' + +def paddle_round(name:str, x): + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.round(data) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + x = np.random.uniform(-1000,1000, (8, 24, 32)).astype(data_type) + + paddle_round("round", x) + +if __name__ == "__main__": + main() diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_set_value.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_set_value.py index 4a9be9b3017e78..3230b09dffc3d3 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_set_value.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_set_value.py @@ -10,6 +10,7 @@ import paddle from save_model import saveModel +maxint32 = np.iinfo(np.int32).max def concat(data): data = [np.expand_dims(d, 0) for d in data] @@ -141,6 +142,21 @@ def set_value5(x, value, *slice): # paddle_set_value("set_value7", data, value, set_value_step1, dtype) + shape = (7, 9) + dtype = "int32" + data = np.random.randint(0, 5, shape).astype(dtype) + value = np.random.randint(-100, -1, (3, 1)).astype(dtype) + + starts = generate_data([4], np.int64) + ends = generate_data([maxint32], np.int64) + steps = generate_data([1], np.int64) + + def set_value8(x, value, *slice): + x[build_slice(*slice)] = value + return x + + paddle_set_value("set_value8", data, value, set_value8, dtype, starts, ends, steps) + # shape = (10, 5) # dtype = "float32" # data = np.random.randint(0, 5, shape).astype(dtype) @@ -167,4 +183,4 @@ def set_value7(x, value, *slice): paddle_set_value("set_value_dynamic2", data, value, set_value7, dtype, starts, ends, steps, is_dynamic=True) if __name__ == "__main__": - main() \ No newline at end of file + main()